logmower-shipper/cmd/sender.go

172 lines
4.4 KiB
Go

package logmower
import (
"context"
"path/filepath"
"time"
"github.com/jtagcat/util"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.mongodb.org/mongo-driver/bson"
"go.uber.org/zap"
)
var (
promShipperMongoSent = promauto.NewCounterVec(prom.CounterOpts{
Subsystem: "shipper",
Name: "sent_count",
Help: "Items successfully committed to mongo",
}, []string{"filename"})
promShipperMongoSentError = promauto.NewCounterVec(prom.CounterOpts{
Subsystem: "shipper",
Name: "mongo_errors_count",
Help: "Errors while submitting to mongo", // TODO:
}, []string{"filename"})
promShipperDropped = promauto.NewCounterVec(prom.CounterOpts{
Subsystem: "shipper",
Name: "queue_dropped_count",
Help: "Items ready to be batched and sent to mongo, but dropped due to full queue",
}, []string{"filename"})
promLineParsingErr = promauto.NewCounterVec(prom.CounterOpts{
Subsystem: "shipper",
Name: "lines_parsing_err_count",
Help: "Errors while parsing log line suffixes",
}, []string{"filename"})
promShipperQueueItems = promauto.NewHistogramVec(prom.HistogramOpts{
Subsystem: "shipper",
Name: "queue_items",
Help: "Items in queue to be batched and sent to mongo",
}, []string{"filename"})
promShipperSynced = promauto.NewGaugeVec(prom.GaugeOpts{
Subsystem: "shipper",
Name: "batches_synced",
Help: "All batches available have been sent to mongo",
}, []string{"filename"})
)
const (
MaxBatchItems = 100
MaxBatchTime = time.Second
)
func init() {
promauto.NewGaugeFunc(prom.GaugeOpts{
Subsystem: "shipper",
Name: "queue_size",
Help: "Submit queue size cap",
}, func() float64 {
return float64(SendQueueLimit)
})
promauto.NewGaugeFunc(prom.GaugeOpts{
Subsystem: "shipper",
Name: "batch_size",
Help: "batching size cap",
}, func() float64 {
return float64(MaxBatchItems)
})
promauto.NewGaugeFunc(prom.GaugeOpts{
Subsystem: "shipper",
Name: "batch_time",
Help: "batching delay cap",
}, func() float64 {
return float64(MaxBatchTime)
})
}
func (s *submitter) sender(name string, sendQueue <-chan mLog) (synced func() bool) {
batched := make(chan []mLog)
go func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
baseName := filepath.Base(name)
for {
promShipperQueueItems.WithLabelValues(baseName).Observe(float64(
len(sendQueue)))
timer := time.NewTimer(time.Second)
select {
case <-ctx.Done():
return
case <-timer.C:
}
}
}()
util.Batch(MaxBatchItems, MaxBatchTime, sendQueue, batched)
}()
var batchSynced bool
s.Add(1)
go s.senderRoutine(name, batched, &batchSynced)
return func() bool {
return batchSynced && len(sendQueue) == 0
}
}
func (s *submitter) senderRoutine(name string, batched <-chan []mLog, synced *bool) {
defer s.Done()
baseName := filepath.Base(name)
for {
*synced = true
promShipperSynced.WithLabelValues(baseName).Set(1)
batch, ok := <-batched
if !ok {
return
}
*synced = false
promShipperSynced.WithLabelValues(baseName).Set(0)
var batchBson []interface{} // mongo does not like typing
for _, b := range batch {
batchBson = append(batchBson, b.toBson())
}
result, err := s.db.InsertMany(mongoTimeoutCtx(context.Background()), batchBson, nil)
promShipperMongoSent.WithLabelValues(name).Add(float64(
len(result.InsertedIDs)))
if err != nil {
s.l.Error("mongo send returned error; TODO: add some selective retry here or something", zap.Error(err)) // TODO:
}
}
}
// when editing, also edit toBson(); all bson.D (and bson.M) uses
type mLog struct {
HostInfo HostInfo
File string
Offset int64 // byte offset where log entry ends at
Content string // TODO:
ShipTime time.Time
CollectTime time.Time
StdErr bool
Format string // F or P TODO: what does it mean? Is there a well-defined log format for cri-o?
}
// not using marshal, since it is <0.1x performance
func (l *mLog) toBson() bson.M {
return bson.M{
"host_info": bson.M{
"id": l.HostInfo.id,
"name": l.HostInfo.name,
"arch": l.HostInfo.arch,
},
"filename": l.File,
"offset": l.Offset,
"content": l.Content,
"ship_time": l.ShipTime,
"container_time": l.CollectTime,
"stderr": l.StdErr,
"format": l.Format,
}
}