2022-10-11 07:55:22 +00:00
|
|
|
package logmower
|
|
|
|
|
|
|
|
import (
|
2022-11-04 08:47:45 +00:00
|
|
|
"context"
|
2022-11-05 23:43:59 +00:00
|
|
|
"path/filepath"
|
2022-10-11 07:55:22 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/jtagcat/util"
|
|
|
|
prom "github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
|
|
"go.mongodb.org/mongo-driver/bson"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2022-11-04 08:47:45 +00:00
|
|
|
promShipperMongoSent = promauto.NewCounterVec(prom.CounterOpts{
|
2022-10-11 07:55:22 +00:00
|
|
|
Subsystem: "shipper",
|
|
|
|
Name: "sent_count",
|
2022-11-04 08:47:45 +00:00
|
|
|
Help: "Items successfully committed to mongo",
|
|
|
|
}, []string{"filename"})
|
|
|
|
promShipperMongoSentError = promauto.NewCounterVec(prom.CounterOpts{
|
2022-10-11 07:55:22 +00:00
|
|
|
Subsystem: "shipper",
|
2022-11-04 08:47:45 +00:00
|
|
|
Name: "mongo_errors_count",
|
2022-10-11 07:55:22 +00:00
|
|
|
Help: "Errors while submitting to mongo", // TODO:
|
2022-11-04 08:47:45 +00:00
|
|
|
}, []string{"filename"})
|
2022-10-11 07:55:22 +00:00
|
|
|
promShipperDropped = promauto.NewCounterVec(prom.CounterOpts{
|
|
|
|
Subsystem: "shipper",
|
2022-11-04 08:47:45 +00:00
|
|
|
Name: "queue_dropped_count",
|
|
|
|
Help: "Items ready to be batched and sent to mongo, but dropped due to full queue",
|
2022-10-11 07:55:22 +00:00
|
|
|
}, []string{"filename"})
|
2022-11-06 01:43:18 +00:00
|
|
|
promLineParsingErr = promauto.NewCounterVec(prom.CounterOpts{
|
|
|
|
Subsystem: "shipper",
|
|
|
|
Name: "lines_parsing_err_count",
|
|
|
|
Help: "Errors while parsing log line suffixes",
|
|
|
|
}, []string{"filename"})
|
2022-11-04 08:47:45 +00:00
|
|
|
promShipperQueueItems = promauto.NewHistogramVec(prom.HistogramOpts{
|
|
|
|
Subsystem: "shipper",
|
|
|
|
Name: "queue_items",
|
|
|
|
Help: "Items in queue to be batched and sent to mongo",
|
|
|
|
}, []string{"filename"})
|
|
|
|
promShipperSynced = promauto.NewGaugeVec(prom.GaugeOpts{
|
|
|
|
Subsystem: "shipper",
|
|
|
|
Name: "batches_synced",
|
|
|
|
Help: "All batches available have been sent to mongo",
|
|
|
|
}, []string{"filename"})
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2022-11-05 23:45:19 +00:00
|
|
|
MaxBatchItems = 100
|
|
|
|
MaxBatchTime = time.Second
|
2022-10-11 07:55:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
promauto.NewGaugeFunc(prom.GaugeOpts{
|
|
|
|
Subsystem: "shipper",
|
|
|
|
Name: "queue_size",
|
2022-11-04 08:47:45 +00:00
|
|
|
Help: "Submit queue size cap",
|
2022-10-11 07:55:22 +00:00
|
|
|
}, func() float64 {
|
|
|
|
return float64(SendQueueLimit)
|
|
|
|
})
|
|
|
|
promauto.NewGaugeFunc(prom.GaugeOpts{
|
|
|
|
Subsystem: "shipper",
|
2022-11-04 08:47:45 +00:00
|
|
|
Name: "batch_size",
|
|
|
|
Help: "batching size cap",
|
2022-10-11 07:55:22 +00:00
|
|
|
}, func() float64 {
|
2022-11-04 08:47:45 +00:00
|
|
|
return float64(MaxBatchItems)
|
2022-10-11 07:55:22 +00:00
|
|
|
})
|
2022-11-04 08:47:45 +00:00
|
|
|
promauto.NewGaugeFunc(prom.GaugeOpts{
|
|
|
|
Subsystem: "shipper",
|
|
|
|
Name: "batch_time",
|
|
|
|
Help: "batching delay cap",
|
|
|
|
}, func() float64 {
|
|
|
|
return float64(MaxBatchTime)
|
|
|
|
})
|
|
|
|
}
|
2022-10-11 07:55:22 +00:00
|
|
|
|
2022-11-04 08:47:45 +00:00
|
|
|
func (s *submitter) sender(name string, sendQueue <-chan mLog) (synced func() bool) {
|
2022-10-11 07:55:22 +00:00
|
|
|
batched := make(chan []mLog)
|
|
|
|
|
|
|
|
go func() {
|
2022-11-04 08:47:45 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
go func() {
|
2022-11-05 23:43:59 +00:00
|
|
|
baseName := filepath.Base(name)
|
2022-11-04 08:47:45 +00:00
|
|
|
for {
|
2022-11-05 23:43:59 +00:00
|
|
|
promShipperQueueItems.WithLabelValues(baseName).Observe(float64(
|
2022-11-04 08:47:45 +00:00
|
|
|
len(sendQueue)))
|
|
|
|
|
|
|
|
timer := time.NewTimer(time.Second)
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
util.Batch(MaxBatchItems, MaxBatchTime, sendQueue, batched)
|
2022-10-11 07:55:22 +00:00
|
|
|
}()
|
|
|
|
|
2022-11-04 08:47:45 +00:00
|
|
|
var batchSynced bool
|
|
|
|
s.Add(1)
|
|
|
|
go s.senderRoutine(name, batched, &batchSynced)
|
|
|
|
|
|
|
|
return func() bool {
|
|
|
|
return batchSynced && len(sendQueue) == 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *submitter) senderRoutine(name string, batched <-chan []mLog, synced *bool) {
|
|
|
|
defer s.Done()
|
|
|
|
|
2022-11-05 23:43:59 +00:00
|
|
|
baseName := filepath.Base(name)
|
|
|
|
|
2022-10-11 07:55:22 +00:00
|
|
|
for {
|
2022-11-04 08:47:45 +00:00
|
|
|
*synced = true
|
2022-11-05 23:43:59 +00:00
|
|
|
promShipperSynced.WithLabelValues(baseName).Set(1)
|
2022-11-04 08:47:45 +00:00
|
|
|
|
2022-10-11 07:55:22 +00:00
|
|
|
batch, ok := <-batched
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-04 08:47:45 +00:00
|
|
|
*synced = false
|
2022-11-05 23:43:59 +00:00
|
|
|
promShipperSynced.WithLabelValues(baseName).Set(0)
|
2022-11-04 08:47:45 +00:00
|
|
|
|
2022-10-11 07:55:22 +00:00
|
|
|
var batchBson []interface{} // mongo does not like typing
|
|
|
|
for _, b := range batch {
|
|
|
|
batchBson = append(batchBson, b.toBson())
|
|
|
|
}
|
|
|
|
|
2022-11-04 08:47:45 +00:00
|
|
|
result, err := s.db.InsertMany(mongoTimeoutCtx(context.Background()), batchBson, nil)
|
|
|
|
promShipperMongoSent.WithLabelValues(name).Add(float64(
|
2022-10-11 07:55:22 +00:00
|
|
|
len(result.InsertedIDs)))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
s.l.Error("mongo send returned error; TODO: add some selective retry here or something", zap.Error(err)) // TODO:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// when editing, also edit toBson(); all bson.D (and bson.M) uses
|
|
|
|
type mLog struct {
|
2022-11-06 01:43:18 +00:00
|
|
|
HostInfo HostInfo
|
|
|
|
File string
|
|
|
|
Offset int64 // byte offset where log entry ends at
|
|
|
|
Content string // TODO:
|
|
|
|
ShipTime time.Time
|
|
|
|
CollectTime time.Time
|
|
|
|
StdErr bool
|
|
|
|
Format string // F or P TODO: what does it mean? Is there a well-defined log format for cri-o?
|
2022-10-11 07:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// not using marshal, since it is <0.1x performance
|
|
|
|
func (l *mLog) toBson() bson.M {
|
|
|
|
return bson.M{
|
2022-11-06 01:43:18 +00:00
|
|
|
"host_info": bson.M{
|
|
|
|
"id": l.HostInfo.id,
|
|
|
|
"name": l.HostInfo.name,
|
|
|
|
"arch": l.HostInfo.arch,
|
|
|
|
},
|
|
|
|
"filename": l.File,
|
|
|
|
"offset": l.Offset,
|
|
|
|
"content": l.Content,
|
|
|
|
"ship_time": l.ShipTime,
|
|
|
|
"container_time": l.CollectTime,
|
|
|
|
"stderr": l.StdErr,
|
|
|
|
"format": l.Format,
|
2022-10-11 07:55:22 +00:00
|
|
|
}
|
|
|
|
}
|