package file import ( "context" "errors" "fmt" "io" "log" "os" "time" "git.k-space.ee/k-space/logmower-shipper/pkg/globals" "git.k-space.ee/k-space/logmower-shipper/pkg/lines" m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo" "git.k-space.ee/k-space/logmower-shipper/pkg/sender" "github.com/jtagcat/util" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" mongoOpt "go.mongodb.org/mongo-driver/mongo/options" "k8s.io/apimachinery/pkg/util/wait" ) const SendQueueLimit = 1024 type File struct { *m.File MetricsName string // filepath.Base() } // TODO: caller could call duplicate shipFile of same name on file replace: sends might not work properly func (f File) Process(ctx context.Context, db *mongo.Collection, recordLimitBytes int) { lineChan := make(chan lines.Raw) defer close(lineChan) dbQueue := make(chan m.Record, SendQueueLimit) go lines.RawC(lineChan).Process(recordLimitBytes, dbQueue) waitGo := util.GoWg(func() { sender.Queue(dbQueue).Sender(db, f.MetricsName) }) defer waitGo() // TODO: better way to kill or wait for sendQueue before retrying (or duplicates?) _ = wait.ManagedExponentialBackoffWithContext(ctx, globals.Backoff(), func() (done bool, _ error) { err := f.trySubmit(ctx, db, lineChan) if err == nil { return true, nil } promFileErr.WithLabelValues(f.MetricsName).Add(1) log.Printf("processing file %q: %e", f.MetricsName, err) // nil: loop and keep retrying indefinitely return false, nil }) } // use submitter(), don't use directly func (f File) trySubmit(ctx context.Context, db *mongo.Collection, sendQueue chan<- lines.Raw) error { lFile := lines.File(f) // file.File, but avoiding import cycle // TODO: better way for respecting ?killing sender for retry for { if len(sendQueue) == 0 { break } time.Sleep(time.Second) } // get files with offset offsetResult, _ := mongoWithErr(db.FindOne(globals.MongoTimeout(ctx), bson.D{{Key: m.RecordKeyHostId, Value: f.Host.Id}, {Key: m.RecordKeyFilePath, Value: f.Path}}, &mongoOpt.FindOneOptions{Sort: bson.D{{Key: m.RecordKeyOffset, Value: -1}}}, // sort descending (get largest) )) offsetResultBytes, err := offsetResult.DecodeBytes() if err != nil && !errors.Is(err, mongo.ErrNoDocuments) { return fmt.Errorf("retrieving offset from database: %w", err) } dbOffset := m.RecordOffsetFromBson(&offsetResultBytes) fi, err := os.Stat(f.Path) if err != nil { return fmt.Errorf("getting original file size: %w", err) } startSize := fi.Size() sctx, cancel := context.WithCancel(ctx) defer cancel() promFileInitialSeekSkipped.WithLabelValues(f.MetricsName).Set(float64(dbOffset)) lineChan, errChan, err := util.TailFile(sctx, f.Path, dbOffset, io.SeekStart) if err != nil { return fmt.Errorf("tailing file: %w", err) } var catchUpped bool promFileCatchupDone.WithLabelValues(f.MetricsName).Set(0) for { select { case err := <-errChan: return fmt.Errorf("tailing file: %w", err) case line, ok := <-lineChan: if !ok { return nil } promFileLineSize.WithLabelValues(f.MetricsName).Observe(float64(len(line.Bytes))) if !catchUpped { catchUpped = line.EndOffset >= startSize if catchUpped { promFileCatchupDone.WithLabelValues(f.MetricsName).Set(1) } } if len(line.Bytes) == 0 { continue } sendQueue <- lines.Raw{ File: &lFile, Offset: line.EndOffset, B: line.Bytes, } } } } func mongoWithErr[t interface{ Err() error }](mongoWrap t) (t, error) { return mongoWrap, mongoWrap.Err() } // func JitterUntilCancelWithContext(pctx context.Context, f func(context.Context, context.CancelFunc), period time.Duration, jitterFactor float64, sliding bool) { // ctx, cancel := context.WithCancel(pctx) // wait.JitterUntil(func() { f(ctx, cancel) }, period, jitterFactor, sliding, ctx.Done()) // }