logmower-shipper/logmower/sender.go

109 lines
3.0 KiB
Go
Raw Normal View History

package logmower
import (
2022-11-04 08:47:45 +00:00
"context"
"log"
"time"
2022-11-09 14:00:44 +00:00
ms "git.k-space.ee/k-space/logmower-shipper/pkg/mongo_struct"
"github.com/jtagcat/util"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.mongodb.org/mongo-driver/mongo"
)
var (
promShipperQueued = promauto.NewGaugeVec(prom.GaugeOpts{
2022-11-06 15:02:49 +00:00
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "shipper_record", // "queued",
Help: "Log records in queue to be batched and sent to database",
2022-11-04 08:47:45 +00:00
}, []string{"filename"})
promShipperDbSent = promauto.NewCounterVec(prom.CounterOpts{
2022-11-06 15:02:49 +00:00
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "record", // "sent",
Help: "Log records successfully committed to database",
2022-11-04 08:47:45 +00:00
}, []string{"filename"})
promShipperBatchSizeResult = promauto.NewHistogram(prom.HistogramOpts{
2022-11-06 15:02:49 +00:00
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "bulk_submission_message", // "items_in_batch"
Help: "Batch size for database submissions",
Buckets: []float64{1, 5, 10, 50, 100, 500, 1000, 5000, 10000},
})
2022-11-06 23:23:20 +00:00
promShipperDbSendError = promauto.NewCounterVec(prom.CounterOpts{
2022-11-06 15:02:49 +00:00
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "insertion_error", // "errors",
Help: "Errors while submitting to database", // TODO:
2022-11-04 08:47:45 +00:00
}, []string{"filename"})
promShipperSynced = promauto.NewGaugeVec(prom.GaugeOpts{
2022-11-06 15:02:49 +00:00
Namespace: PrometheusPrefix,
2022-11-04 08:47:45 +00:00
Subsystem: "shipper",
Name: "batches_synced",
Help: "All batches available have been committed database (0 or 1)",
2022-11-04 08:47:45 +00:00
}, []string{"filename"})
)
const (
MaxBatchItems = 10000
MaxBatchTime = 5 * time.Second
)
type queueT <-chan ms.Record
func (queue queueT) sender(db *mongo.Collection, metricsFilename string) {
batched := make(chan []ms.Record)
// batcher and queue metrics
go func() {
2022-11-04 08:47:45 +00:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for {
promShipperQueued.WithLabelValues(metricsFilename).Set(float64(
len(queue)))
2022-11-04 08:47:45 +00:00
timer := time.NewTimer(time.Second)
select {
case <-ctx.Done():
return
case <-timer.C:
}
}
}()
util.Batch(MaxBatchItems, MaxBatchTime, queue, batched)
// returns when sendQueue is closed
}()
for {
promShipperSynced.WithLabelValues(metricsFilename).Set(1)
2022-11-04 08:47:45 +00:00
batch, ok := <-batched
if !ok {
return
}
2022-11-06 23:23:20 +00:00
promShipperBatchSizeResult.Observe(float64(len(batch)))
promShipperSynced.WithLabelValues(metricsFilename).Set(0)
2022-11-04 08:47:45 +00:00
var batchBson []interface{} // mongo does not like typing
for _, b := range batch {
batchBson = append(batchBson, b.ToBson())
}
result, err := db.InsertMany(mongoTimeoutCtx(context.Background()), batchBson, nil)
if err != nil {
promShipperDbSendError.WithLabelValues(metricsFilename).Add(1)
log.Printf("failure in batch submit to database: %e", err) // TODO: add some selective retry here or something, better error handling
2022-11-06 20:24:33 +00:00
continue
}
2022-11-06 20:24:33 +00:00
promShipperDbSent.WithLabelValues(metricsFilename).Add(float64(
2022-11-06 20:24:33 +00:00
len(result.InsertedIDs)))
}
}