logmower-shipper/pkg/watcher/app.go
2022-11-11 17:09:12 +02:00

157 lines
4.8 KiB
Go

package watcher
import (
"fmt"
"log"
"path/filepath"
"sync"
"git.k-space.ee/k-space/logmower-shipper/pkg/file"
"git.k-space.ee/k-space/logmower-shipper/pkg/globals"
"git.k-space.ee/k-space/logmower-shipper/pkg/lines"
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
"git.k-space.ee/k-space/logmower-shipper/pkg/sender"
"git.k-space.ee/k-space/logmower-shipper/pkg/util"
"github.com/fsnotify/fsnotify"
"github.com/urfave/cli/v2"
mongoOpt "go.mongodb.org/mongo-driver/mongo/options"
)
var App = &cli.App{
Name: globals.AppName,
Version: "1.0.0",
Authors: []*cli.Author{{Name: "jtagcat"}, {Name: "codemowers.io"}},
Description: "Collect and ship kubernetes logs",
// TODO: #2: yaml
Flags: []cli.Flag{
&cli.BoolFlag{Name: "simulate", Aliases: []string{"dry-run"}, Usage: "Do not write to database"},
&cli.StringFlag{Name: "log-directory", Usage: "Directory to watch for logs", Value: "/var/log/containers"},
&cli.IntFlag{Name: "max-record-size", EnvVars: []string{"MAX_RECORD_SIZE"}, Value: 128 * 1024, Usage: "Maximum record size in bytes"},
&cli.IntFlag{Name: "bulk-insertion-size", EnvVars: []string{"BULK_INSERTION_SIZE"}, Value: 1000, Usage: "MongoDB bulk insertion size in records"},
&cli.Uint64Flag{Name: "max-connection-pool-size", EnvVars: []string{"MAX_CONNECTION_POOL_SIZE"}, Value: 1, Usage: "Max MongoDB connection pool size"},
//
//TODO: &cli.BoolFlag{Name: "normalize-log-level", Usage: "Normalize log.level values to Syslog defined keywords"},
//TODO: &cli.BoolFlag{Name: "parse-json"},
//
&cli.StringSliceFlag{Category: "selectors", Name: "namespace", EnvVars: []string{"NAMESPACE"}, Usage: "whitelist filter for filenames"},
&cli.StringSliceFlag{Category: "selectors", Name: "exclude-pod-prefixes", EnvVars: []string{"EXCLUDE_POD_PREFIXES"}, Usage: "blacklist filter for filenames", Value: cli.NewStringSlice("logmower-")},
//
&cli.StringFlag{Category: "secrets", Name: "mongo-uri", EnvVars: []string{"MONGODB_URI"}, Usage: "mongodb://foo:bar@host:27017/database", Required: true},
},
Before: func(ctx *cli.Context) error {
lines.BufferLimitBytes = ctx.Int("max-record-size")
if lines.BufferLimitBytes < 1 {
return fmt.Errorf("max-record-size must be positive")
}
sender.Simulate = ctx.Bool("simulate")
sender.MaxBatchItems = ctx.Int("bulk-insertion-size")
return nil
},
Action: func(ctx *cli.Context) error {
whitelistNamespaces, blacklistPodPrefixes := sliceToMap(ctx.StringSlice("namespace")), ctx.StringSlice("exclude-pod-prefixes")
var wg sync.WaitGroup
log.Printf("%s %s starting", ctx.App.Name, ctx.App.Version)
db, err := m.Initialize(ctx.Context, ctx.String("mongo-uri"), mongoOpt.Client().
SetMaxPoolSize(ctx.Uint64("max-connection-pool-size")))
if err != nil {
return fmt.Errorf("initializing database connection: %w", err)
}
hostinfo, err := util.Hostinfo(ctx.String("node-name"))
if err != nil {
return fmt.Errorf("populating host info: %w", err)
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("initializing log directory watcher: %w", err)
}
defer watcher.Close()
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-ctx.Context.Done():
return
case event, ok := <-watcher.Events:
if !ok {
return
}
promWatcherEvents.Add(1)
if event.Op != fsnotify.Create {
continue
}
// TODO: #1: || if not in filterset
kubeInfo, ok := util.ParseLogFilename(event.Name)
if !ok {
promWatcherFilesSkipped.Add(1)
log.Printf("skipped %q: filename not parsable in kubernetes log format", filepath.Base(event.Name))
continue
}
if _, ok := whitelistNamespaces[kubeInfo.Namespace]; !ok {
continue
}
if ok := hasSlicePrefix(kubeInfo.Pod, blacklistPodPrefixes); ok {
continue
}
promWatcherFilesStarted.Add(1)
wg.Add(1)
go func() {
file := file.File{
File: &m.File{
Host: &hostinfo,
KubeInfo: kubeInfo,
Path: event.Name,
},
MetricsName: filepath.Base(event.Name),
}
file.Process(ctx.Context, db)
wg.Done()
}()
case err, ok := <-watcher.Errors:
if !ok {
return
}
promWatcherErr.Add(1)
log.Printf("watching for new logs: %e", err)
}
}
}()
logDir := ctx.String("log-directory")
// simulate create events to pick up files already created
if err := simulateInitialCreates(logDir, watcher.Events); err != nil {
return fmt.Errorf("listing log directory %q: %w", logDir, err)
}
if err := watcher.Add(logDir); err != nil {
return fmt.Errorf("watching for new logs in %q: %w", logDir, err)
}
promWatcherOnline.Set(1)
// waiting indefinitely for interrupt
wg.Wait() // wait for watch and file processors to cleanup
return ctx.Err()
},
}