work on flags
This commit is contained in:
parent
2ded62f641
commit
0b3d382742
@ -7,8 +7,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/globals"
|
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/lines"
|
"git.k-space.ee/k-space/logmower-shipper/pkg/lines"
|
||||||
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/sender"
|
"git.k-space.ee/k-space/logmower-shipper/pkg/sender"
|
||||||
@ -21,6 +21,16 @@ import (
|
|||||||
|
|
||||||
const SendQueueLimit = 1024
|
const SendQueueLimit = 1024
|
||||||
|
|
||||||
|
// wrapper to force copying before use
|
||||||
|
func backoff() wait.Backoff {
|
||||||
|
return wait.Backoff{
|
||||||
|
Duration: 2 * time.Second,
|
||||||
|
Factor: 1.5,
|
||||||
|
Jitter: 0.1,
|
||||||
|
Cap: 30 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
*m.File
|
*m.File
|
||||||
MetricsName string // filepath.Base()
|
MetricsName string // filepath.Base()
|
||||||
@ -28,7 +38,7 @@ type File struct {
|
|||||||
|
|
||||||
// TODO: caller could call duplicate shipFile of same name on file replace: sends might not work properly
|
// TODO: caller could call duplicate shipFile of same name on file replace: sends might not work properly
|
||||||
func (f File) Process(ctx context.Context, db *mongo.Collection) {
|
func (f File) Process(ctx context.Context, db *mongo.Collection) {
|
||||||
_ = wait.ManagedExponentialBackoffWithContext(ctx, globals.Backoff(), func() (done bool, _ error) {
|
_ = wait.ManagedExponentialBackoffWithContext(ctx, backoff(), func() (done bool, _ error) {
|
||||||
err := f.process(ctx, db)
|
err := f.process(ctx, db)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -74,7 +84,7 @@ func (f File) process(ctx context.Context, db *mongo.Collection) error {
|
|||||||
defer dfn()
|
defer dfn()
|
||||||
|
|
||||||
// get files with offset
|
// get files with offset
|
||||||
offsetResult, _ := mongoWithErr(db.FindOne(globals.MongoTimeout(sctx),
|
offsetResult, _ := mongoWithErr(db.FindOne(m.GlobalTimeout(sctx),
|
||||||
bson.D{{Key: m.RecordKeyHostId, Value: f.Host.Id}, {Key: m.RecordKeyFilePath, Value: f.Path}},
|
bson.D{{Key: m.RecordKeyHostId, Value: f.Host.Id}, {Key: m.RecordKeyFilePath, Value: f.Path}},
|
||||||
&mongoOpt.FindOneOptions{Sort: bson.D{{Key: m.RecordKeyOffset, Value: -1}}}, // sort descending (get largest)
|
&mongoOpt.FindOneOptions{Sort: bson.D{{Key: m.RecordKeyOffset, Value: -1}}}, // sort descending (get largest)
|
||||||
))
|
))
|
||||||
|
@ -1,36 +1,8 @@
|
|||||||
package globals
|
package globals
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Did not find any better way for multipackage prefixing
|
// Did not find any better way for multipackage prefixing
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PrometheusPrefix = "logmower"
|
PrometheusPrefix = "logmower"
|
||||||
AppName = PrometheusPrefix + "shipper"
|
AppName = PrometheusPrefix + "shipper"
|
||||||
DatabaseCommandTimeout = 10 * time.Second
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
BufferLimitBytes int
|
|
||||||
Simulate bool
|
|
||||||
)
|
|
||||||
|
|
||||||
func MongoTimeout(ctx context.Context) context.Context {
|
|
||||||
ctx, _ = context.WithTimeout(ctx, DatabaseCommandTimeout) //nolint:lostcancel (cancelled by mongo, should be bug on them //TODO)
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// wrapper to force copying before use
|
|
||||||
func Backoff() wait.Backoff {
|
|
||||||
return wait.Backoff{
|
|
||||||
Duration: 2 * time.Second,
|
|
||||||
Factor: 1.5,
|
|
||||||
Jitter: 0.1,
|
|
||||||
Cap: 30 * time.Second,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -5,10 +5,11 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/globals"
|
|
||||||
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var BufferLimitBytes int
|
||||||
|
|
||||||
type (
|
type (
|
||||||
RawC <-chan Raw
|
RawC <-chan Raw
|
||||||
Raw struct {
|
Raw struct {
|
||||||
@ -87,9 +88,9 @@ func (lines singleLines) process(ctx context.Context, parsed chan<- m.Record) {
|
|||||||
|
|
||||||
buffer = append(buffer, line.B...)
|
buffer = append(buffer, line.B...)
|
||||||
|
|
||||||
if len(buffer) > globals.BufferLimitBytes {
|
if len(buffer) > BufferLimitBytes && BufferLimitBytes != 0 {
|
||||||
promRecordDroppedTooLarge.WithLabelValues(line.MetricsName).Add(1)
|
promRecordDroppedTooLarge.WithLabelValues(line.MetricsName).Add(1)
|
||||||
log.Printf("dropped record: size in bytes exceeds limit of %d", globals.BufferLimitBytes)
|
log.Printf("dropped record: size in bytes exceeds limit of %d", BufferLimitBytes)
|
||||||
|
|
||||||
buffer = nil
|
buffer = nil
|
||||||
continue
|
continue
|
||||||
|
@ -20,9 +20,8 @@ var promDbHeartbeat = promauto.NewHistogramVec(prom.HistogramOpts{
|
|||||||
Buckets: []float64{0.1, 0.2, 0.5, 1, 5, 10, 50},
|
Buckets: []float64{0.1, 0.2, 0.5, 1, 5, 10, 50},
|
||||||
}, []string{"connection_id"})
|
}, []string{"connection_id"})
|
||||||
|
|
||||||
func monitoredClientOptions() *mongoOpt.ClientOptions {
|
func attachMetrics(opts *mongoOpt.ClientOptions) *mongoOpt.ClientOptions {
|
||||||
return mongoOpt.Client().
|
opts.SetServerMonitor(&mongoEvent.ServerMonitor{
|
||||||
SetServerMonitor(&mongoEvent.ServerMonitor{
|
|
||||||
ServerHeartbeatSucceeded: func(ev *mongoEvent.ServerHeartbeatSucceededEvent) {
|
ServerHeartbeatSucceeded: func(ev *mongoEvent.ServerHeartbeatSucceededEvent) {
|
||||||
promDbHeartbeat.WithLabelValues(ev.ConnectionID).Observe(time.Duration(ev.DurationNanos).Seconds())
|
promDbHeartbeat.WithLabelValues(ev.ConnectionID).Observe(time.Duration(ev.DurationNanos).Seconds())
|
||||||
},
|
},
|
||||||
@ -31,4 +30,5 @@ func monitoredClientOptions() *mongoOpt.ClientOptions {
|
|||||||
log.Printf("database heartbeat failed on connection %q: %e", ev.ConnectionID, ev.Failure)
|
log.Printf("database heartbeat failed on connection %q: %e", ev.ConnectionID, ev.Failure)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
return opts
|
||||||
}
|
}
|
||||||
|
@ -4,12 +4,20 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/globals"
|
|
||||||
"go.mongodb.org/mongo-driver/mongo"
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
mongoOpt "go.mongodb.org/mongo-driver/mongo/options"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Initialize(ctx context.Context, uri string) (*mongo.Collection, error) {
|
const CommandTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
func GlobalTimeout(ctx context.Context) context.Context {
|
||||||
|
ctx, _ = context.WithTimeout(ctx, CommandTimeout) //nolint:lostcancel (cancelled by mongo, should be bug on them //TODO)
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func Initialize(ctx context.Context, uri string, opts *mongoOpt.ClientOptions) (*mongo.Collection, error) {
|
||||||
uriParsed, err := url.ParseRequestURI(uri)
|
uriParsed, err := url.ParseRequestURI(uri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("parsing URI for database name: %w", err)
|
return nil, fmt.Errorf("parsing URI for database name: %w", err)
|
||||||
@ -20,20 +28,20 @@ func Initialize(ctx context.Context, uri string) (*mongo.Collection, error) {
|
|||||||
return nil, fmt.Errorf("URI must include database name (as database to authenticate against)")
|
return nil, fmt.Errorf("URI must include database name (as database to authenticate against)")
|
||||||
}
|
}
|
||||||
|
|
||||||
dbOpt := monitoredClientOptions().ApplyURI(uri)
|
dbOpt := attachMetrics(opts).ApplyURI(uri)
|
||||||
|
|
||||||
dbClient, err := mongo.Connect(globals.MongoTimeout(ctx), dbOpt)
|
dbClient, err := mongo.Connect(GlobalTimeout(ctx), dbOpt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("connecting to %q: %w", dbOpt.GetURI(), err)
|
return nil, fmt.Errorf("connecting to %q: %w", dbOpt.GetURI(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := dbClient.Ping(globals.MongoTimeout(ctx), nil); err != nil {
|
if err := dbClient.Ping(GlobalTimeout(ctx), nil); err != nil {
|
||||||
return nil, fmt.Errorf("first ping to database: %w", err)
|
return nil, fmt.Errorf("first ping to database: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
col := dbClient.Database(uriParsed.Path).Collection("logs")
|
col := dbClient.Database(uriParsed.Path).Collection("logs")
|
||||||
|
|
||||||
if err := InitializeIndexes(globals.MongoTimeout(ctx), col); err != nil {
|
if err := InitializeIndexes(GlobalTimeout(ctx), col); err != nil {
|
||||||
return nil, fmt.Errorf("initializing indexes: %w", err)
|
return nil, fmt.Errorf("initializing indexes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,15 +6,18 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/globals"
|
|
||||||
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
||||||
"github.com/jtagcat/util"
|
"github.com/jtagcat/util"
|
||||||
"go.mongodb.org/mongo-driver/mongo"
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
"go.mongodb.org/mongo-driver/mongo/options"
|
"go.mongodb.org/mongo-driver/mongo/options"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Simulate = false
|
||||||
|
MaxBatchItems = 1000
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MaxBatchItems = 10000
|
|
||||||
MaxBatchTime = 5 * time.Second
|
MaxBatchTime = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -86,14 +89,14 @@ func (queue Queue) Sender(db *mongo.Collection, metricsFilename string, cancelOn
|
|||||||
}
|
}
|
||||||
|
|
||||||
func insertManyWithSimulate(db *mongo.Collection, batch []m.Record) (*mongo.InsertManyResult, error) {
|
func insertManyWithSimulate(db *mongo.Collection, batch []m.Record) (*mongo.InsertManyResult, error) {
|
||||||
if !globals.Simulate {
|
if !Simulate {
|
||||||
var batchBson []interface{} // mongo does not like typing
|
var batchBson []interface{} // mongo does not like typing
|
||||||
for _, b := range batch {
|
for _, b := range batch {
|
||||||
batchBson = append(batchBson, b.ToBson())
|
batchBson = append(batchBson, b.ToBson())
|
||||||
}
|
}
|
||||||
|
|
||||||
tru := true
|
tru := true
|
||||||
return db.InsertMany(globals.MongoTimeout(context.Background()), batchBson, &options.InsertManyOptions{Ordered: &tru})
|
return db.InsertMany(m.GlobalTimeout(context.Background()), batchBson, &options.InsertManyOptions{Ordered: &tru})
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("simulating successful database bulk write: %v", batch)
|
fmt.Printf("simulating successful database bulk write: %v", batch)
|
||||||
|
@ -8,10 +8,13 @@ import (
|
|||||||
|
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/file"
|
"git.k-space.ee/k-space/logmower-shipper/pkg/file"
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/globals"
|
"git.k-space.ee/k-space/logmower-shipper/pkg/globals"
|
||||||
|
"git.k-space.ee/k-space/logmower-shipper/pkg/lines"
|
||||||
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
m "git.k-space.ee/k-space/logmower-shipper/pkg/mongo"
|
||||||
|
"git.k-space.ee/k-space/logmower-shipper/pkg/sender"
|
||||||
"git.k-space.ee/k-space/logmower-shipper/pkg/util"
|
"git.k-space.ee/k-space/logmower-shipper/pkg/util"
|
||||||
"github.com/fsnotify/fsnotify"
|
"github.com/fsnotify/fsnotify"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
mongoOpt "go.mongodb.org/mongo-driver/mongo/options"
|
||||||
)
|
)
|
||||||
|
|
||||||
var App = &cli.App{
|
var App = &cli.App{
|
||||||
@ -20,39 +23,42 @@ var App = &cli.App{
|
|||||||
Authors: []*cli.Author{{Name: "jtagcat"}, {Name: "codemowers.io"}},
|
Authors: []*cli.Author{{Name: "jtagcat"}, {Name: "codemowers.io"}},
|
||||||
|
|
||||||
Description: "Collect and ship kubernetes logs",
|
Description: "Collect and ship kubernetes logs",
|
||||||
// Usage: "rubykana <input>",
|
|
||||||
// TODO: #2: yaml
|
// TODO: #2: yaml
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.BoolFlag{Name: "simulate", Aliases: []string{"dry-run"}, Usage: "Do not write to database"},
|
&cli.BoolFlag{Name: "simulate", Aliases: []string{"dry-run"}, Usage: "Do not write to database"},
|
||||||
&cli.StringFlag{Name: "log-directory", Usage: "Directory to watch for logs", Value: "/var/log/containers"},
|
&cli.StringFlag{Name: "log-directory", Usage: "Directory to watch for logs", Value: "/var/log/containers"},
|
||||||
&cli.IntFlag{Name: "max-record-size", Value: 128 * 1024, Usage: "Maximum record size in bytes"},
|
&cli.IntFlag{Name: "max-record-size", EnvVars: []string{"MAX_RECORD_SIZE"}, Value: 128 * 1024, Usage: "Maximum record size in bytes"},
|
||||||
|
&cli.IntFlag{Name: "bulk-insertion-size", EnvVars: []string{"BULK_INSERTION_SIZE"}, Value: 1000, Usage: "MongoDB bulk insertion size in records"},
|
||||||
|
&cli.Uint64Flag{Name: "max-connection-pool-size", EnvVars: []string{"MAX_CONNECTION_POOL_SIZE"}, Value: 1, Usage: "Max MongoDB connection pool size"},
|
||||||
//
|
//
|
||||||
//TODO: &cli.BoolFlag{Name: "normalize-log-level", Usage: "Normalize log.level values to Syslog defined keywords"},
|
//TODO: &cli.BoolFlag{Name: "normalize-log-level", Usage: "Normalize log.level values to Syslog defined keywords"},
|
||||||
//TODO: &cli.BoolFlag{Name: "parse-json"},
|
//TODO: &cli.BoolFlag{Name: "parse-json"},
|
||||||
//
|
//
|
||||||
&cli.StringSliceFlag{Category: "selectors", Name: "namespace", EnvVars: []string{"KUBE_NAMESPACE"}, Usage: "whitelist filter for filenames"},
|
&cli.StringSliceFlag{Category: "selectors", Name: "namespace", EnvVars: []string{"NAMESPACE"}, Usage: "whitelist filter for filenames"},
|
||||||
&cli.StringSliceFlag{Category: "selectors", Name: "pod-prefix", EnvVars: []string{"KUBE_NODE_NAME"}, Usage: "blacklist filter for filenames"},
|
&cli.StringSliceFlag{Category: "selectors", Name: "exclude-pod-prefixes", EnvVars: []string{"EXCLUDE_POD_PREFIXES"}, Usage: "blacklist filter for filenames", Value: cli.NewStringSlice("logmower-")},
|
||||||
//
|
//
|
||||||
&cli.StringFlag{Category: "secrets", Name: "mongo-uri", EnvVars: []string{"MONGODB_URI"}, Usage: "mongodb://foo:bar@host:27017/database", Required: true},
|
&cli.StringFlag{Category: "secrets", Name: "mongo-uri", EnvVars: []string{"MONGODB_URI"}, Usage: "mongodb://foo:bar@host:27017/database", Required: true},
|
||||||
},
|
},
|
||||||
Before: func(ctx *cli.Context) error {
|
Before: func(ctx *cli.Context) error {
|
||||||
globals.BufferLimitBytes = ctx.Int("max-record-size")
|
lines.BufferLimitBytes = ctx.Int("max-record-size")
|
||||||
if globals.BufferLimitBytes < 1 {
|
if lines.BufferLimitBytes < 1 {
|
||||||
return fmt.Errorf("max-record-size must be positive")
|
return fmt.Errorf("max-record-size must be positive")
|
||||||
}
|
}
|
||||||
|
|
||||||
globals.Simulate = ctx.Bool("simulate")
|
sender.Simulate = ctx.Bool("simulate")
|
||||||
|
sender.MaxBatchItems = ctx.Int("bulk-insertion-size")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|
||||||
Action: func(ctx *cli.Context) error {
|
Action: func(ctx *cli.Context) error {
|
||||||
whitelistNamespaces, blacklistPodPrefixes := sliceToMap(ctx.StringSlice("namespace")), ctx.StringSlice("pod-prefix")
|
whitelistNamespaces, blacklistPodPrefixes := sliceToMap(ctx.StringSlice("namespace")), ctx.StringSlice("exclude-pod-prefixes")
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
log.Printf("%s %s starting", ctx.App.Name, ctx.App.Version)
|
log.Printf("%s %s starting", ctx.App.Name, ctx.App.Version)
|
||||||
|
|
||||||
db, err := m.Initialize(ctx.Context, ctx.String("mongo-uri"))
|
db, err := m.Initialize(ctx.Context, ctx.String("mongo-uri"), mongoOpt.Client().
|
||||||
|
SetMaxPoolSize(ctx.Uint64("max-connection-pool-size")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("initializing database connection: %w", err)
|
return fmt.Errorf("initializing database connection: %w", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user