rename mongoStruct to mongo_struct

This commit is contained in:
2022-11-09 16:00:44 +02:00
parent 69e0230949
commit 33471fac2d
11 changed files with 32 additions and 13 deletions

103
logmower/lines.go Normal file
View File

@@ -0,0 +1,103 @@
package logmower
import (
"log"
"sync"
ms "git.k-space.ee/k-space/logmower-shipper/pkg/mongo_struct"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var promRecordDroppedTooLarge = promauto.NewCounterVec(prom.CounterOpts{
Namespace: PrometheusPrefix,
// Subsystem: "record",
Name: "dropped_lines", // "dropped",
Help: "Records dropped due to being too large",
}, []string{"filename"})
type (
RawLines <-chan RawLine
RawLine struct {
*file
Offset int64
B []byte
}
)
// assumes all lines are from same file
func (unparsed RawLines) Process(bufferLimitBytes int, parsed chan<- ms.Record) {
lines := make(chan singleLine)
go unparsed.parse(lines)
var wg sync.WaitGroup
wg.Add(2)
stdOut, stdErr := make(chan singleLine), make(chan singleLine)
go func() {
singleLines(stdOut).process(bufferLimitBytes, parsed)
wg.Done()
}()
go func() {
singleLines(stdErr).process(bufferLimitBytes, parsed)
wg.Done()
}()
// split stdout and stderr
for {
line, ok := <-lines
if !ok {
close(stdOut)
close(stdErr)
wg.Wait()
close(parsed)
return
}
if line.StdErr {
stdErr <- line
} else {
stdOut <- line
}
}
}
func (lines singleLines) process(bufferLimitBytes int, parsed chan<- ms.Record) {
var firstMetadata *ms.ParsedMetadata
var buffer []byte
for {
line, ok := <-lines
if !ok {
// partial line should always be finished with full line
// discard any partial lines without end (full line)
return
}
if len(buffer) == 0 {
firstMetadata = &line.ParsedMetadata
}
buffer = append(buffer, line.B...)
if len(buffer) > bufferLimitBytes {
promRecordDroppedTooLarge.WithLabelValues(line.metricsName).Add(1)
log.Printf("dropped record: size in bytes exceeds limit of %d", bufferLimitBytes)
buffer = nil
continue
}
if !line.partial {
parsed <- ms.Record{
File: line.file.File,
Offset: line.Offset,
String: string(buffer),
ParsedMetadata: *firstMetadata,
}
buffer = nil
}
}
}

75
logmower/lines_single.go Normal file
View File

@@ -0,0 +1,75 @@
package logmower
import (
"bytes"
"fmt"
"log"
"time"
ms "git.k-space.ee/k-space/logmower-shipper/pkg/mongo_struct"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var promRecordPrefixParsingErr = promauto.NewCounterVec(prom.CounterOpts{
Namespace: PrometheusPrefix,
Subsystem: "record",
Name: "parsing_errors",
Help: "Errors while parsing log line prefixes",
}, []string{"filename"})
func (unparsed RawLines) parse(parsed chan<- singleLine) {
for {
raw, ok := <-unparsed
if !ok {
close(parsed)
return
}
line := singleLine{RawLine: raw}
if err := line.parse(); err != nil {
promRecordPrefixParsingErr.WithLabelValues(raw.metricsName).Add(1)
log.Printf("parsing kubernetes log line in %q: %e", raw.File.Path, err)
}
// TODO: should this only be on success?
parsed <- line
}
}
type (
singleLines <-chan singleLine
singleLine struct {
RawLine
// populated by parse()
ms.ParsedMetadata
partial bool // P or F
}
)
func (line *singleLine) parse() (err error) {
split := bytes.SplitN(line.B, []byte(" "), 4)
if len(split) != 4 {
return fmt.Errorf("expected at least 3 spaces in , got %d", len(split)-1)
}
line.TimeKubernetes, err = time.Parse(time.RFC3339Nano, string(split[0]))
if err != nil {
return fmt.Errorf("invalid time: %w", err)
}
line.StdErr = string(split[1]) == "stderr" // or stdout
switch string(split[2]) {
case "P":
line.partial = true
case "F":
default:
return fmt.Errorf("partial indicator must be 'P' or 'F', not %q", split[2])
}
line.B = split[3]
return nil
}

91
logmower/mongo.go Normal file
View File

@@ -0,0 +1,91 @@
package logmower
import (
"context"
"fmt"
"log"
"net/url"
"time"
ms "git.k-space.ee/k-space/logmower-shipper/pkg/mongo_struct"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
mongoEvent "go.mongodb.org/mongo-driver/event"
"go.mongodb.org/mongo-driver/mongo"
mongoOpt "go.mongodb.org/mongo-driver/mongo/options"
)
var (
promDbHeartbeat = promauto.NewHistogramVec(prom.HistogramOpts{
Namespace: PrometheusPrefix,
Subsystem: "database",
Name: "heartbeat_time",
Help: "Time in seconds for succeeded heartbeat, or 0 on failure",
Buckets: []float64{0.1, 0.2, 0.5, 1, 5, 10, 50},
}, []string{"connection_id"})
promDbCmd = promauto.NewHistogramVec(prom.HistogramOpts{
Namespace: PrometheusPrefix,
Subsystem: "database",
Name: "operation_latency", // "command_time",
Help: "Time in seconds of commands",
Buckets: []float64{0.1, 0.2, 0.5, 1, 5, 10, 50},
}, []string{"connection_id", "command_name"})
promDbCmdErr = promauto.NewCounterVec(prom.CounterOpts{
Namespace: PrometheusPrefix,
Subsystem: "database",
Name: "errors",
Help: "Failed commands (also reflected elsewhere)",
}, []string{"connection_id", "command_name"})
)
func mongoMonitoredClientOptions() *mongoOpt.ClientOptions {
return mongoOpt.Client().
SetServerMonitor(&mongoEvent.ServerMonitor{
ServerHeartbeatSucceeded: func(ev *mongoEvent.ServerHeartbeatSucceededEvent) {
promDbHeartbeat.WithLabelValues(ev.ConnectionID).Observe(time.Duration(ev.DurationNanos).Seconds())
},
ServerHeartbeatFailed: func(ev *mongoEvent.ServerHeartbeatFailedEvent) {
promDbHeartbeat.WithLabelValues(ev.ConnectionID).Observe(0)
log.Printf("database heartbeat failed on connection %q: %e", ev.ConnectionID, ev.Failure)
},
}).
SetMonitor(&mongoEvent.CommandMonitor{
Succeeded: func(_ context.Context, ev *mongoEvent.CommandSucceededEvent) {
promDbCmd.WithLabelValues(ev.ConnectionID, ev.CommandName).Observe(time.Duration(ev.DurationNanos).Seconds())
},
Failed: func(_ context.Context, ev *mongoEvent.CommandFailedEvent) {
promDbCmd.WithLabelValues(ev.ConnectionID, ev.CommandName).Observe(time.Duration(ev.DurationNanos).Seconds())
promDbCmdErr.WithLabelValues(ev.ConnectionID, ev.CommandName).Add(1)
},
})
}
func initDatabase(ctx context.Context, uri string) (*mongo.Collection, error) {
uriParsed, err := url.ParseRequestURI(uri)
if err != nil {
return nil, fmt.Errorf("parsing URI for database name: %w", err)
}
uriParsed.Path = uriParsed.Path[1:] // remove leading slash
if uriParsed.Path == "" {
return nil, fmt.Errorf("URI must include database name (as database to authenticate against)")
}
dbOpt := mongoMonitoredClientOptions().ApplyURI(uri)
dbClient, err := mongo.Connect(mongoTimeoutCtx(ctx))
if err != nil {
return nil, fmt.Errorf("connecting to %q: %w", dbOpt.GetURI(), err)
}
col := dbClient.Database(uriParsed.Path).Collection("logs")
if err := ms.InitializeIndexes(mongoTimeoutCtx(ctx), col); err != nil {
return nil, fmt.Errorf("initializing indexes: %w", err)
}
return col, nil
}

108
logmower/sender.go Normal file
View File

@@ -0,0 +1,108 @@
package logmower
import (
"context"
"log"
"time"
ms "git.k-space.ee/k-space/logmower-shipper/pkg/mongo_struct"
"github.com/jtagcat/util"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.mongodb.org/mongo-driver/mongo"
)
var (
promShipperQueued = promauto.NewGaugeVec(prom.GaugeOpts{
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "shipper_record", // "queued",
Help: "Log records in queue to be batched and sent to database",
}, []string{"filename"})
promShipperDbSent = promauto.NewCounterVec(prom.CounterOpts{
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "record", // "sent",
Help: "Log records successfully committed to database",
}, []string{"filename"})
promShipperBatchSizeResult = promauto.NewHistogram(prom.HistogramOpts{
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "bulk_submission_message", // "items_in_batch"
Help: "Batch size for database submissions",
Buckets: []float64{1, 5, 10, 50, 100, 500, 1000, 5000, 10000},
})
promShipperDbSendError = promauto.NewCounterVec(prom.CounterOpts{
Namespace: PrometheusPrefix,
// Subsystem: "shipper",
Name: "insertion_error", // "errors",
Help: "Errors while submitting to database", // TODO:
}, []string{"filename"})
promShipperSynced = promauto.NewGaugeVec(prom.GaugeOpts{
Namespace: PrometheusPrefix,
Subsystem: "shipper",
Name: "batches_synced",
Help: "All batches available have been committed database (0 or 1)",
}, []string{"filename"})
)
const (
MaxBatchItems = 10000
MaxBatchTime = 5 * time.Second
)
type queueT <-chan ms.Record
func (queue queueT) sender(db *mongo.Collection, metricsFilename string) {
batched := make(chan []ms.Record)
// batcher and queue metrics
go func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for {
promShipperQueued.WithLabelValues(metricsFilename).Set(float64(
len(queue)))
timer := time.NewTimer(time.Second)
select {
case <-ctx.Done():
return
case <-timer.C:
}
}
}()
util.Batch(MaxBatchItems, MaxBatchTime, queue, batched)
// returns when sendQueue is closed
}()
for {
promShipperSynced.WithLabelValues(metricsFilename).Set(1)
batch, ok := <-batched
if !ok {
return
}
promShipperBatchSizeResult.Observe(float64(len(batch)))
promShipperSynced.WithLabelValues(metricsFilename).Set(0)
var batchBson []interface{} // mongo does not like typing
for _, b := range batch {
batchBson = append(batchBson, b.ToBson())
}
result, err := db.InsertMany(mongoTimeoutCtx(context.Background()), batchBson, nil)
if err != nil {
promShipperDbSendError.WithLabelValues(metricsFilename).Add(1)
log.Printf("failure in batch submit to database: %e", err) // TODO: add some selective retry here or something, better error handling
continue
}
promShipperDbSent.WithLabelValues(metricsFilename).Add(float64(
len(result.InsertedIDs)))
}
}

168
logmower/submitter.go Normal file
View File

@@ -0,0 +1,168 @@
package logmower
import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"time"
ms "git.k-space.ee/k-space/logmower-shipper/pkg/mongo_struct"
"github.com/jtagcat/util"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
mongoOpt "go.mongodb.org/mongo-driver/mongo/options"
"k8s.io/apimachinery/pkg/util/wait"
)
var (
promFileInitialSeekSkipped = promauto.NewGaugeVec(prom.GaugeOpts{
Namespace: PrometheusPrefix,
// Subsystem: "file",
Name: "skipped_bytes",
Help: "Bytes skipped in file after discovering",
}, []string{"filename"})
promFileCatchupDone = promauto.NewGaugeVec(prom.GaugeOpts{
Namespace: PrometheusPrefix,
Subsystem: "file",
Name: "catchupped",
Help: "(0 or) 1 if initial backlog has been sent; (total <= watcher_file_count)",
}, []string{"filename"}) // TODO: rm filename?
promFileErr = promauto.NewCounterVec(prom.CounterOpts{
Namespace: PrometheusPrefix,
Subsystem: "file",
Name: "errors_count",
Help: "Errors while reading file",
}, []string{"filename"})
promFileLineSize = promauto.NewHistogramVec(prom.HistogramOpts{
Namespace: PrometheusPrefix,
// Subsystem: "file",
Name: "line_size_bytes",
Help: "Log line size in bytes",
Buckets: []float64{80, 160, 320, 640, 1280},
}, []string{"filename"})
)
const SendQueueLimit = 1024
type file struct {
ms.File
metricsName string // filepath.Base()
}
// TODO: caller could call duplicate shipFile of same name on file replace: sends might not work properly
func (f file) Process(ctx context.Context, db *mongo.Collection, recordLimitBytes int) {
lineChan := make(chan RawLine)
defer close(lineChan)
dbQueue := make(chan ms.Record, SendQueueLimit)
go RawLines(lineChan).Process(recordLimitBytes, dbQueue)
waitGo := util.GoWg(func() {
queueT(dbQueue).sender(db, f.metricsName)
})
defer waitGo()
// TODO: better way to kill or wait for sendQueue before retrying (or duplicates?)
_ = wait.ManagedExponentialBackoffWithContext(ctx, defaultBackoff(), func() (done bool, _ error) {
err := f.trySubmit(ctx, db, lineChan)
if err == nil {
return true, nil
}
promFileErr.WithLabelValues(f.metricsName).Add(1)
log.Printf("processing file %q: %e", f.metricsName, err)
// nil: loop and keep retrying indefinitely
return false, nil
})
}
// use submitter(), don't use directly
func (f file) trySubmit(ctx context.Context, db *mongo.Collection, sendQueue chan<- RawLine) error {
// TODO: better way for respecting ?killing sender for retry
for {
if len(sendQueue) == 0 {
break
}
time.Sleep(time.Second)
}
// get files with offset
offsetResult, _ := mongoWithErr(db.FindOne(mongoTimeoutCtx(ctx),
bson.D{{Key: ms.RecordKeyHostId, Value: f.Host.Id}, {Key: ms.RecordKeyFilePath, Value: f.Path}},
&mongoOpt.FindOneOptions{Sort: bson.D{{Key: ms.RecordKeyOffset, Value: -1}}}, // sort descending (get largest)
))
offsetResultBytes, err := offsetResult.DecodeBytes()
if err != nil && !errors.Is(err, mongo.ErrNoDocuments) {
return fmt.Errorf("retrieving offset from database: %w", err)
}
dbOffset := ms.RecordOffsetFromBson(&offsetResultBytes)
fi, err := os.Stat(f.Path)
if err != nil {
return fmt.Errorf("getting original file size: %w", err)
}
startSize := fi.Size()
sctx, cancel := context.WithCancel(ctx)
defer cancel()
promFileInitialSeekSkipped.WithLabelValues(f.metricsName).Set(float64(dbOffset))
lineChan, errChan, err := util.TailFile(sctx, f.Path, dbOffset, io.SeekStart)
if err != nil {
return fmt.Errorf("tailing file: %w", err)
}
var catchUpped bool
promFileCatchupDone.WithLabelValues(f.metricsName).Set(0)
for {
select {
case err := <-errChan:
return fmt.Errorf("tailing file: %w", err)
case line, ok := <-lineChan:
if !ok {
return nil
}
promFileLineSize.WithLabelValues(f.metricsName).Observe(float64(len(line.Bytes)))
if !catchUpped {
catchUpped = line.EndOffset >= startSize
if catchUpped {
promFileCatchupDone.WithLabelValues(f.metricsName).Set(1)
}
}
if len(line.Bytes) == 0 {
continue
}
sendQueue <- RawLine{
file: &f,
Offset: line.EndOffset,
B: line.Bytes,
}
}
}
}
func mongoWithErr[t interface{ Err() error }](mongoWrap t) (t, error) {
return mongoWrap, mongoWrap.Err()
}
// func JitterUntilCancelWithContext(pctx context.Context, f func(context.Context, context.CancelFunc), period time.Duration, jitterFactor float64, sliding bool) {
// ctx, cancel := context.WithCancel(pctx)
// wait.JitterUntil(func() { f(ctx, cancel) }, period, jitterFactor, sliding, ctx.Done())
// }

208
logmower/watcher.go Normal file
View File

@@ -0,0 +1,208 @@
package logmower
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
ms "git.k-space.ee/k-space/logmower-shipper/pkg/mongo_struct"
"github.com/fsnotify/fsnotify"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/urfave/cli/v2"
"k8s.io/apimachinery/pkg/util/wait"
)
const DatabaseCommandTimeout = 10 * time.Second
const PrometheusPrefix = "logmower" // TODO:
// wrapper to force copying before use
func defaultBackoff() wait.Backoff {
return wait.Backoff{
Duration: 2 * time.Second,
Factor: 1.5,
Jitter: 0.1,
Cap: 30 * time.Second,
}
}
func mongoTimeoutCtx(ctx context.Context) context.Context {
ctx, _ = context.WithTimeout(ctx, DatabaseCommandTimeout) //nolint:lostcancel (cancelled by mongo, should be bug on them //TODO)
return ctx
}
var App = &cli.App{
Name: "logmower-shipper",
Version: "1.0.0",
Authors: []*cli.Author{{Name: "jtagcat"}},
Description: "Collect and ship kubernetes logs",
// Usage: "rubykana <input>",
// TODO: #2: yaml
Flags: []cli.Flag{
&cli.BoolFlag{Name: "dry-run", Usage: "Do not write to database"}, // TODO:
&cli.StringFlag{Name: "log-directory", Usage: "Directory to watch for logs", Value: "/var/log/containers"},
&cli.IntFlag{Name: "max-record-size", Value: 128 * 1024, Usage: "Maximum record size in bytes"}, // TODO:
&cli.BoolFlag{Name: "normalize-log-level", Usage: "Normalize log.level values to Syslog defined keywords"}, // TODO:
// &cli.BoolFlag{Name: "parse-json"}, //TODO:
&cli.StringFlag{Category: "k8s metadata", Name: "pod-namespace", EnvVars: []string{"KUBE_POD_NAMESPACE"}}, // TODO:
&cli.StringFlag{Category: "k8s metadata", Name: "node-name", EnvVars: []string{"KUBE_NODE_NAME"}, Required: true},
&cli.StringFlag{Category: "secrets", Name: "mongo-uri", EnvVars: []string{"MONGO_URI"}, Usage: "mongodb://foo:bar@host:27017/database", Required: true},
},
Before: func(ctx *cli.Context) error {
if ctx.Int("max-record-size") < 1 {
return fmt.Errorf("max-record-size must be postivie")
}
return nil
},
Action: func(ctx *cli.Context) error {
var (
promWatcherOnline = promauto.NewGauge(prom.GaugeOpts{
Namespace: PrometheusPrefix,
Subsystem: "watcher",
Name: "online",
Help: "1 if initialized, and directory watcher has been engaged successfully",
})
promWatcherErr = promauto.NewCounter(prom.CounterOpts{
Namespace: PrometheusPrefix,
Subsystem: "watcher",
Name: "errors",
Help: "Error in logmower watching log files",
})
promWatcherFilesStarted = promauto.NewCounter(prom.CounterOpts{
Namespace: PrometheusPrefix,
// Subsystem: "watcher",
Name: "log_file", // "discovered_logfiles",
Help: "Number of tracked log files",
})
promWatcherFilesSkipped = promauto.NewCounter(prom.CounterOpts{
Namespace: PrometheusPrefix,
// Subsystem: "watcher",
Name: "invalid_filename", // "skipped_files",
Help: "Number of files in log directory skipped due to unexpected filename",
})
promWatcherEvents = promauto.NewCounter(prom.CounterOpts{
Namespace: PrometheusPrefix,
// Subsystem: "watcher",
Name: "inotify_event", // "events",
Help: "Number of events while watchng (includes initial create events for existing file discovery)",
})
)
ctx.Context, _ = signal.NotifyContext(ctx.Context, os.Interrupt) // TODO: test
var wg sync.WaitGroup
log.Printf("%s %s starting", ctx.App.Name, ctx.App.Version)
db, err := initDatabase(ctx.Context, ctx.String("mongo-uri"))
if err != nil {
return fmt.Errorf("initializing database connection: %w", err)
}
var hostInfo ms.HostInfo
if err := hostInfo.Populate(ctx.String("node-name")); err != nil {
return fmt.Errorf("populating host info: %w", err)
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("initializing log directory watcher: %w", err)
}
defer watcher.Close()
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-ctx.Context.Done():
return
case event, ok := <-watcher.Events:
if !ok {
return
}
promWatcherEvents.Add(1)
if event.Op != fsnotify.Create {
continue
}
// TODO: #1: || if not in filterset
kubeInfo, ok := ms.ParseLogName(event.Name)
if !ok {
promWatcherFilesSkipped.Add(1)
log.Printf("skipped %q: filename not parsable in kubernetes log format", filepath.Base(event.Name))
continue
}
promWatcherFilesStarted.Add(1)
wg.Add(1)
go func() {
file := file{
File: ms.File{
Host: &hostInfo,
KubeInfo: kubeInfo,
Path: event.Name,
},
metricsName: filepath.Base(event.Name),
}
file.Process(ctx.Context, db, ctx.Int("max-record-size"))
wg.Done()
}()
case err, ok := <-watcher.Errors:
if !ok {
return
}
promWatcherErr.Add(1)
log.Printf("watching for new logs: %e", err)
}
}
}()
logDir := ctx.String("log-directory")
// simulate create events to pick up files already created
if err := simulateInitialCreates(logDir, watcher.Events); err != nil {
return fmt.Errorf("listing log directory %q: %w", logDir, err)
}
if err := watcher.Add(logDir); err != nil {
return fmt.Errorf("watching for new logs in %q: %w", logDir, err)
}
promWatcherOnline.Set(1)
// waiting indefinitely for interrupt
wg.Wait() // wait for watch and file processors to cleanup
return ctx.Err()
},
}
func simulateInitialCreates(dirName string, eventChan chan<- fsnotify.Event) error {
dir, err := os.ReadDir(dirName)
if err != nil {
return err
}
for _, file := range dir {
eventChan <- fsnotify.Event{
Name: filepath.Join(dirName, file.Name()),
Op: fsnotify.Create,
}
}
return nil
}