package logmower import ( "context" "errors" "fmt" "io" "os" "path/filepath" "sync" "time" "github.com/jtagcat/util" prom "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" mongoOpt "go.mongodb.org/mongo-driver/mongo/options" "go.uber.org/zap" "k8s.io/apimachinery/pkg/util/wait" ) var ( promFileInitialSeekSkipped = promauto.NewGaugeVec(prom.GaugeOpts{ Namespace: PrometheusPrefix, // Subsystem: "file", Name: "skipped_bytes", Help: "Bytes skipped in file after discovering", }, []string{"filename"}) promFileCatchupDone = promauto.NewGaugeVec(prom.GaugeOpts{ Namespace: PrometheusPrefix, Subsystem: "file", Name: "catchupped", Help: "(0 or) 1 if initial backlog has been sent; (total <= watcher_file_count)", }, []string{"filename"}) // TODO: rm filename? promFileErr = promauto.NewCounterVec(prom.CounterOpts{ Namespace: PrometheusPrefix, Subsystem: "file", Name: "errors_count", Help: "Errors while reading file", }, []string{"filename"}) promFileLineSize = promauto.NewHistogramVec(prom.HistogramOpts{ Namespace: PrometheusPrefix, // Subsystem: "file", Name: "line_size_bytes", Help: "Log line size in bytes", Buckets: []float64{80, 160, 320, 640, 1280}, }, []string{"filename"}) ) type ( submitter struct { l *zap.Logger hostInfo HostInfo db *mongo.Collection sync.WaitGroup } ) const SendQueueLimit = 1024 // TODO: caller may call duplicate shipFile of same name on file replace; sends might not work properly func (s *submitter) shipFile(ctx context.Context, name string, recordLimitBytes int) { baseName := filepath.Base(name) lineChan := make(chan rawLine) defer close(lineChan) sendChan := make(chan mLog, SendQueueLimit) defer close(sendChan) go s.parseLines(recordLimitBytes, lineChan, sendChan) go s.sender(name, sendChan) // TODO: better way to kill or wait for sendQueue before retrying (or duplicates?) wait.ManagedExponentialBackoffWithContext(ctx, defaultBackoff(), func() (done bool, _ error) { // err := s.shipFileRoutine(ctx, name, lineChan) if err == nil { return true, nil } promFileErr.WithLabelValues(baseName).Add(1) s.l.Error("shipping file", zap.String("filename", name), zap.Error(err)) return false, nil // nil since we want to loop and keep retrying indefinitely }) } func (s *submitter) shipFileRoutine(ctx context.Context, name string, sendQueue chan<- rawLine) error { baseName := filepath.Base(name) // TODO: better way for respecting ?killing sender for retry for { if len(sendQueue) == 0 { break } time.Sleep(time.Second) } // get files with offset offsetResult, err := mongoWithErr(s.db.FindOne(mongoTimeoutCtx(ctx), bson.D{{Key: mongoKeyHostId, Value: s.hostInfo.id}, {Key: mongoKeyFileBasename, Value: baseName}}, &mongoOpt.FindOneOptions{Sort: bson.D{{Key: mongoKeyOffset, Value: -1}}}, // sort descending (get largest) )) if err != nil && !errors.Is(err, mongo.ErrNoDocuments) { return fmt.Errorf("retrieving offset from database: %w", err) } var log mLog if err := offsetResult.Decode(&log); err != nil && !errors.Is(err, mongo.ErrNoDocuments) { return fmt.Errorf("decoding offset from database: %w", err) } fi, err := os.Stat(name) if err != nil { return fmt.Errorf("getting original file size: %w", err) } startSize := fi.Size() sctx, cancel := context.WithCancel(ctx) defer cancel() promFileInitialSeekSkipped.WithLabelValues(baseName).Set(float64(log.Offset)) lineChan, errChan, err := util.TailFile(sctx, name, log.Offset, io.SeekStart) if err != nil { return fmt.Errorf("tailing file: %w", err) } var catchUpped bool // cache promFileCatchupDone.WithLabelValues(baseName).Set(0) // TODO: partial line combining // TODO: promRecordDroppedTooLarge for { select { case err := <-errChan: return fmt.Errorf("tailing file: %w", err) case line, ok := <-lineChan: if !ok { return nil } promFileLineSize.WithLabelValues(baseName).Observe(float64(len(line.Bytes))) if !catchUpped { catchUpped = line.EndOffset >= startSize if catchUpped { promFileCatchupDone.WithLabelValues(baseName).Set(1) } } if len(line.Bytes) == 0 { continue } sendQueue <- rawLine{ recordMetadata: recordMetadata{ HostInfo: s.hostInfo, File: baseName, Offset: line.EndOffset, }, line: line.Bytes, } } } } func mongoWithErr[t interface{ Err() error }](mongoWrap t) (t, error) { return mongoWrap, mongoWrap.Err() } // func JitterUntilCancelWithContext(pctx context.Context, f func(context.Context, context.CancelFunc), period time.Duration, jitterFactor float64, sliding bool) { // ctx, cancel := context.WithCancel(pctx) // wait.JitterUntil(func() { f(ctx, cancel) }, period, jitterFactor, sliding, ctx.Done()) // }