logmower-shipper/cmd/sender.go

154 lines
3.8 KiB
Go
Raw Normal View History

package logmower
import (
2022-11-04 08:47:45 +00:00
"context"
"time"
"github.com/jtagcat/util"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.mongodb.org/mongo-driver/bson"
"go.uber.org/zap"
)
var (
2022-11-04 08:47:45 +00:00
promShipperMongoSent = promauto.NewCounterVec(prom.CounterOpts{
Subsystem: "shipper",
Name: "sent_count",
2022-11-04 08:47:45 +00:00
Help: "Items successfully committed to mongo",
}, []string{"filename"})
promShipperMongoSentError = promauto.NewCounterVec(prom.CounterOpts{
Subsystem: "shipper",
2022-11-04 08:47:45 +00:00
Name: "mongo_errors_count",
Help: "Errors while submitting to mongo", // TODO:
2022-11-04 08:47:45 +00:00
}, []string{"filename"})
promShipperDropped = promauto.NewCounterVec(prom.CounterOpts{
Subsystem: "shipper",
2022-11-04 08:47:45 +00:00
Name: "queue_dropped_count",
Help: "Items ready to be batched and sent to mongo, but dropped due to full queue",
}, []string{"filename"})
2022-11-04 08:47:45 +00:00
promShipperQueueItems = promauto.NewHistogramVec(prom.HistogramOpts{
Subsystem: "shipper",
Name: "queue_items",
Help: "Items in queue to be batched and sent to mongo",
}, []string{"filename"})
promShipperSynced = promauto.NewGaugeVec(prom.GaugeOpts{
Subsystem: "shipper",
Name: "batches_synced",
Help: "All batches available have been sent to mongo",
}, []string{"filename"})
)
const (
SendQueueLimit = 1024
MaxBatchItems = 100
MaxBatchTime = time.Second
)
func init() {
promauto.NewGaugeFunc(prom.GaugeOpts{
Subsystem: "shipper",
Name: "queue_size",
2022-11-04 08:47:45 +00:00
Help: "Submit queue size cap",
}, func() float64 {
return float64(SendQueueLimit)
})
promauto.NewGaugeFunc(prom.GaugeOpts{
Subsystem: "shipper",
2022-11-04 08:47:45 +00:00
Name: "batch_size",
Help: "batching size cap",
}, func() float64 {
2022-11-04 08:47:45 +00:00
return float64(MaxBatchItems)
})
2022-11-04 08:47:45 +00:00
promauto.NewGaugeFunc(prom.GaugeOpts{
Subsystem: "shipper",
Name: "batch_time",
Help: "batching delay cap",
}, func() float64 {
return float64(MaxBatchTime)
})
}
2022-11-04 08:47:45 +00:00
func (s *submitter) sender(name string, sendQueue <-chan mLog) (synced func() bool) {
batched := make(chan []mLog)
go func() {
2022-11-04 08:47:45 +00:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for {
promShipperQueueItems.WithLabelValues(name).Observe(float64(
len(sendQueue)))
timer := time.NewTimer(time.Second)
select {
case <-ctx.Done():
return
case <-timer.C:
}
}
}()
util.Batch(MaxBatchItems, MaxBatchTime, sendQueue, batched)
}()
2022-11-04 08:47:45 +00:00
var batchSynced bool
s.Add(1)
go s.senderRoutine(name, batched, &batchSynced)
return func() bool {
return batchSynced && len(sendQueue) == 0
}
}
func (s *submitter) senderRoutine(name string, batched <-chan []mLog, synced *bool) {
defer s.Done()
for {
2022-11-04 08:47:45 +00:00
*synced = true
promShipperSynced.WithLabelValues(name).Set(1)
batch, ok := <-batched
if !ok {
return
}
2022-11-04 08:47:45 +00:00
*synced = false
promShipperSynced.WithLabelValues(name).Set(0)
var batchBson []interface{} // mongo does not like typing
for _, b := range batch {
batchBson = append(batchBson, b.toBson())
}
2022-11-04 08:47:45 +00:00
result, err := s.db.InsertMany(mongoTimeoutCtx(context.Background()), batchBson, nil)
promShipperMongoSent.WithLabelValues(name).Add(float64(
len(result.InsertedIDs)))
if err != nil {
s.l.Error("mongo send returned error; TODO: add some selective retry here or something", zap.Error(err)) // TODO:
}
}
}
// when editing, also edit toBson(); all bson.D (and bson.M) uses
type mLog struct {
HostInfo HostInfo
Filename string
Offset int64 // byte offset where log entry ends at
Content string // TODO:
Time time.Time
}
// not using marshal, since it is <0.1x performance
func (l *mLog) toBson() bson.M {
return bson.M{
"host_info": l.HostInfo,
"filename": l.Filename,
"offset": l.Offset,
"content": l.Content,
"time": l.Time,
}
}