From 4fa091aab465d992aa697a85e386694e358e6017 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 12:19:52 +0300 Subject: [PATCH 01/19] An io.Reader that limits the amount of bytes it can read at a time. --- pkg/util/limitedio/limitedio.go | 184 ++++++++++++++++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 pkg/util/limitedio/limitedio.go diff --git a/pkg/util/limitedio/limitedio.go b/pkg/util/limitedio/limitedio.go new file mode 100644 index 00000000..ccc4b72c --- /dev/null +++ b/pkg/util/limitedio/limitedio.go @@ -0,0 +1,184 @@ +package limitedio + +import ( + "bytes" + "errors" + "fmt" + "io" + "strconv" + + "github.com/weaveworks/libgitops/pkg/util/structerr" +) + +// TODO: This code is implicitely tested from pkg/frame, but in the future, +// make an unit test for this + +// DefaultMaxReadSize is 3 MB, which matches the default behavior of Kubernetes. +// (The API server only accepts request bodies of 3MB by default.) +const DefaultMaxReadSize Limit = 3 * 1024 * 1024 + +// Infinite represents no upper bound +const Infinite Limit = -1 + +// Limit describes a numeric upper bound +type Limit int64 + +func (l Limit) String() string { + if l <= 0 { + return "infinite" + } + return strconv.FormatInt(int64(l), 10) +} +func (l Limit) Int64() int64 { return int64(l) } +func (l Limit) Int() (int, error) { + i := int(l) + if int64(i) != int64(l) { + return 0, errors.New("the limit overflows int") + } + return i, nil +} + +func (l Limit) IsLessThan(len int64) bool { + // l <= 0 means "l is infinite" => limit is larger than len => not less than len + if l <= 0 { + return false + } + return l.Int64() < len +} + +func (l Limit) IsLessThanOrEqual(len int64) bool { + // l <= 0 means "l is infinite" => limit is larger than len => not less than len + if l <= 0 { + return false + } + return l.Int64() <= len +} + +// ErrReadSizeOverflow returns a new *ReadSizeOverflowError +func ErrReadSizeOverflow(maxReadSize Limit) *ReadSizeOverflowError { + return &ReadSizeOverflowError{MaxReadSize: maxReadSize} +} + +// Enforce all struct errors implementing structerr.StructError +var _ structerr.StructError = &ReadSizeOverflowError{} + +// ReadSizeOverflowError describes that a read or write has grown larger than +// allowed. It is up to the implementer to describe what a "frame" in this +// context is. This error is e.g. returned from the NewReader implementation. +// If MaxReadSize is non-zero, it is included in the error text. +// +// This error can be checked for equality using errors.Is(err, &ReadSizeOverflowError{}) +type ReadSizeOverflowError struct { + // +optional + MaxReadSize Limit +} + +func (e *ReadSizeOverflowError) Error() string { + msg := "frame was larger than maximum allowed size" + if e.MaxReadSize != 0 { + msg = fmt.Sprintf("%s %d bytes", msg, e.MaxReadSize) + } + return msg +} + +func (e *ReadSizeOverflowError) Is(target error) bool { + _, ok := target.(*ReadSizeOverflowError) + return ok +} + +// Reader is a specialized io.Reader helper type, which allows detecting when +// a read grows larger than the allowed maxReadSize, returning a ErrReadSizeOverflow in that case. +// +// Internally there's a byte counter registering how many bytes have been read using the io.Reader +// across all Read calls since the last ResetCounter reset, which resets the byte counter to 0. This +// means that if you have successfully read one frame within bounds of maxReadSize, and want to +// re-use the underlying io.Reader for the next frame, you shall run ResetCounter to start again. +// +// maxReadSize is specified when constructing an Reader, and defaults to DefaultMaxReadSize +// if left as the empty value 0. +// If maxReadSize is negative, the reader transparently forwards all calls without any restrictions. +// +// Note: The Reader implementation is not thread-safe, that is for higher-level interfaces +// to implement and ensure. +type Reader interface { + // The byte count returned across consecutive Read(p) calls are at maximum maxReadSize, until reset + // by ResetCounter. + io.Reader + // ResetCounter resets the byte counter counting how many bytes have been read using Read(p) + ResetCounter() +} + +// NewReader makes a new Reader implementation. +func NewReader(r io.Reader, maxReadSize Limit) Reader { + // Default maxReadSize if unset. + if maxReadSize == 0 { + maxReadSize = DefaultMaxReadSize + } + + return &ioLimitedReader{ + reader: r, + buf: new(bytes.Buffer), + maxReadSize: maxReadSize, + } +} + +type ioLimitedReader struct { + reader io.Reader + buf *bytes.Buffer + maxReadSize Limit + byteCounter int64 +} + +func (l *ioLimitedReader) Read(b []byte) (int, error) { + // If l.maxReadSize is negative, put no restrictions on the read + maxReadSize := l.maxReadSize.Int64() + if maxReadSize < 0 { + return l.reader.Read(b) + } + // If we've already read more than we're allowed to, return an overflow error + if l.byteCounter > maxReadSize { + // Keep returning this error as long as relevant + return 0, ErrReadSizeOverflow(l.maxReadSize) + + } else if l.byteCounter == maxReadSize { + // At this point we're not sure if the frame actually stops here or not + // To figure that out; read one more byte into tmp + tmp := make([]byte, 1) + tmpn, err := l.reader.Read(tmp) + + // Write the read byte into the persistent buffer, for later use when l.byteCounter < l.maxReadSize + _, _ = l.buf.Write(tmp[:tmpn]) + // Increase the byteCounter, as bytes written to buf counts as "read" + l.byteCounter += int64(tmpn) + + // If no bytes were read; it's ok as we didn't exceed the limit. Return + // the error; often nil or io.EOF in this case. + if tmpn == 0 { + return 0, err + } + // Return that the frame overflowed now, as were able to read the byte (tmpn must be 1) + return 0, ErrReadSizeOverflow(l.maxReadSize) + } // else l.byteCounter < l.maxReadSize + + // We can at maximum read bytesLeft bytes more, shrink b accordingly if b is larger than the + // maximum allowed amount to read. + bytesLeft := maxReadSize - l.byteCounter + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + // First, flush any bytes in the buffer. By convention, the writes to buf have already + // increased byteCounter, so no need to do that now. No need to check the error as buf + // only returns io.EOF, and that's not important, it's even expected in most cases. + m, _ := l.buf.Read(b) + // Move the b slice forward m bytes as the m first bytes of b have now been populated + b = b[m:] + + // Read from the reader into the rest of b + n, err := l.reader.Read(b) + // Register how many bytes have been read now additionally + l.byteCounter += int64(n) + return n, err +} + +func (r *ioLimitedReader) ResetCounter() { r.byteCounter = 0 } From 8521436bdf48b27e06e90222816fa200f4b1022c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 12:21:24 +0300 Subject: [PATCH 02/19] Two small utility packages that allows for better errors and composite io --- pkg/util/compositeio/compositeio.go | 40 +++++++++++++++++++++++++++++ pkg/util/structerr/structerr.go | 13 ++++++++++ 2 files changed, 53 insertions(+) create mode 100644 pkg/util/compositeio/compositeio.go create mode 100644 pkg/util/structerr/structerr.go diff --git a/pkg/util/compositeio/compositeio.go b/pkg/util/compositeio/compositeio.go new file mode 100644 index 00000000..589ba1c4 --- /dev/null +++ b/pkg/util/compositeio/compositeio.go @@ -0,0 +1,40 @@ +package compositeio + +import ( + "fmt" + "io" + + "github.com/weaveworks/libgitops/pkg/tracing" +) + +// ReadCloser returns a composite io.ReadCloser from the given io.Reader and io.Closer +func ReadCloser(r io.Reader, c io.Closer) io.ReadCloser { + return readCloser{r, c} +} + +type readCloser struct { + io.Reader + io.Closer +} + +func (rc readCloser) TracerName() string { + return fmt.Sprintf("compositeio.readCloser{%T, %T}", rc.Reader, rc.Closer) +} + +var _ tracing.TracerNamed = readCloser{} + +// WriteCloser returns a composite io.ReadCloser from the given io.Reader and io.Closer +func WriteCloser(w io.Writer, c io.Closer) io.WriteCloser { + return writeCloser{w, c} +} + +type writeCloser struct { + io.Writer + io.Closer +} + +func (wc writeCloser) TracerName() string { + return fmt.Sprintf("compositeio.writeCloser{%T, %T}", wc.Writer, wc.Closer) +} + +var _ tracing.TracerNamed = writeCloser{} diff --git a/pkg/util/structerr/structerr.go b/pkg/util/structerr/structerr.go new file mode 100644 index 00000000..e135b640 --- /dev/null +++ b/pkg/util/structerr/structerr.go @@ -0,0 +1,13 @@ +package structerr + +// StructError is an interface for errors that are structs, and can be compared for +// errors.Is equality. Equality is determined by type equality, i.e. if the pointer +// receiver is *MyError and target can be successfully casted using target.(*MyError), +// then target and the pointer reciever error are equal, otherwise not. +// +// This is needed because errors.Is does not support equality like this for structs +// by default. +type StructError interface { + error + Is(target error) bool +} From 3d34ae5531e86070752c487ecb7dd37d6dbf0e0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:06:11 +0300 Subject: [PATCH 03/19] High-level pattern for building TracerProviders --- pkg/tracing/tracer_provider.go | 251 +++++++++++++++++++++++++++++++++ 1 file changed, 251 insertions(+) create mode 100644 pkg/tracing/tracer_provider.go diff --git a/pkg/tracing/tracer_provider.go b/pkg/tracing/tracer_provider.go new file mode 100644 index 00000000..92a8d16d --- /dev/null +++ b/pkg/tracing/tracer_provider.go @@ -0,0 +1,251 @@ +package tracing + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" +) + +// SDKTracerProvider represents a TracerProvider that is generated from the OpenTelemetry +// SDK and hence can be force-flushed and shutdown (which in both cases flushes all async, +// batched traces before stopping). +type SDKTracerProvider interface { + trace.TracerProvider + Shutdown(ctx context.Context) error + ForceFlush(ctx context.Context) error +} + +// NewBuilder returns a new TracerProviderBuilder instance. +func NewBuilder() TracerProviderBuilder { + return &builder{} +} + +// TracerProviderBuilder is a builder for a TracerProviderWithShutdown. +type TracerProviderBuilder interface { + // RegisterInsecureOTelExporter registers an exporter to an OpenTelemetry Collector on the + // given address, which defaults to "localhost:55680" if addr is empty. The OpenTelemetry + // Collector speaks gRPC, hence, don't add any "http(s)://" prefix to addr. The OpenTelemetry + // Collector is just a proxy, it in turn can forward e.g. traces to Jaeger and metrics to + // Prometheus. Additional options can be supplied that can override the default behavior. + RegisterInsecureOTelExporter(ctx context.Context, addr string, opts ...otlptracegrpc.Option) TracerProviderBuilder + + // RegisterInsecureJaegerExporter registers an exporter to Jaeger using Jaeger's own HTTP API. + // The default address is "http://localhost:14268/api/traces" if addr is left empty. + // Additional options can be supplied that can override the default behavior. + RegisterInsecureJaegerExporter(addr string, opts ...jaeger.CollectorEndpointOption) TracerProviderBuilder + + // RegisterStdoutExporter exports pretty-formatted telemetry data to os.Stdout, or another writer if + // stdouttrace.WithWriter(w) is supplied as an option. Note that stdouttrace.WithoutTimestamps() doesn't + // work due to an upstream bug in OpenTelemetry. TODO: Fix that issue upstream. + RegisterStdoutExporter(opts ...stdouttrace.Option) TracerProviderBuilder + + // WithOptions allows configuring the TracerProvider in various ways, e.g. tracesdk.WithSpanProcessor(sp) + // or tracesdk.WithIDGenerator() + WithOptions(opts ...tracesdk.TracerProviderOption) TracerProviderBuilder + + // WithAttributes allows registering more default attributes for traces created by this TracerProvider. + // By default semantic conventions of version v1.4.0 are used, with "service.name" => "libgitops". + WithAttributes(attrs ...attribute.KeyValue) TracerProviderBuilder + + // WithSynchronousExports allows configuring whether the exporters should export in synchronous mode + // (which must be used ONLY for testing) or (by default) the batching mode. + WithSynchronousExports(sync bool) TracerProviderBuilder + + WithLogging(log bool) TracerProviderBuilder + + // Build builds the SDKTracerProvider. + Build() (SDKTracerProvider, error) + + // InstallGlobally builds the TracerProvider and registers it globally using otel.SetTracerProvider(tp). + InstallGlobally() error +} + +type builder struct { + exporters []tracesdk.SpanExporter + errs []error + tpOpts []tracesdk.TracerProviderOption + attrs []attribute.KeyValue + sync bool + log bool +} + +func (b *builder) RegisterInsecureOTelExporter(ctx context.Context, addr string, opts ...otlptracegrpc.Option) TracerProviderBuilder { + if len(addr) == 0 { + addr = "localhost:55680" + } + + defaultOpts := []otlptracegrpc.Option{ + otlptracegrpc.WithEndpoint(addr), + otlptracegrpc.WithInsecure(), + } + // Make sure to order the defaultOpts first, so opts can override the default ones + opts = append(defaultOpts, opts...) + // Run the main constructor for the otlptracegrpc exporter + exp, err := otlptracegrpc.New(ctx, opts...) + b.exporters = append(b.exporters, exp) + b.errs = append(b.errs, err) + return b +} + +func (b *builder) RegisterInsecureJaegerExporter(addr string, opts ...jaeger.CollectorEndpointOption) TracerProviderBuilder { + defaultOpts := []jaeger.CollectorEndpointOption{} + // Only override if addr is set. Default is "http://localhost:14268/api/traces" + if len(addr) != 0 { + defaultOpts = append(defaultOpts, jaeger.WithEndpoint(addr)) + } + // Make sure to order the defaultOpts first, so opts can override the default ones + opts = append(defaultOpts, opts...) + // Run the main constructor for the jaeger exporter + exp, err := jaeger.New(jaeger.WithCollectorEndpoint(opts...)) + b.exporters = append(b.exporters, exp) + b.errs = append(b.errs, err) + return b +} + +func (b *builder) RegisterStdoutExporter(opts ...stdouttrace.Option) TracerProviderBuilder { + defaultOpts := []stdouttrace.Option{ + stdouttrace.WithPrettyPrint(), + } + // Make sure to order the defaultOpts first, so opts can override the default ones + opts = append(defaultOpts, opts...) + // Run the main constructor for the stdout exporter + exp, err := stdouttrace.New(opts...) + b.exporters = append(b.exporters, exp) + b.errs = append(b.errs, err) + return b +} + +func (b *builder) WithOptions(opts ...tracesdk.TracerProviderOption) TracerProviderBuilder { + b.tpOpts = append(b.tpOpts, opts...) + return b +} + +func (b *builder) WithAttributes(attrs ...attribute.KeyValue) TracerProviderBuilder { + b.attrs = append(b.attrs, attrs...) + return b +} + +func (b *builder) WithSynchronousExports(sync bool) TracerProviderBuilder { + b.sync = sync + return b +} + +func (b *builder) WithLogging(log bool) TracerProviderBuilder { + b.log = log + return b +} + +var ErrNoExportersProvided = errors.New("no exporters provided") + +func (b *builder) Build() (SDKTracerProvider, error) { + // Combine and filter the errors from the exporter building + if err := multierr.Combine(b.errs...); err != nil { + return nil, err + } + if len(b.exporters) == 0 { + return nil, ErrNoExportersProvided + } + // TODO: Require at least one exporter + + // By default, set the service name to "libgitops". + // This can be overridden through WithAttributes + defaultAttrs := []attribute.KeyValue{ + semconv.ServiceNameKey.String("libgitops"), + } + // Make sure to order the defaultAttrs first, so b.attrs can override the default ones + attrs := append(defaultAttrs, b.attrs...) + + // By default, register a resource with the given attributes + defaultTpOpts := []tracesdk.TracerProviderOption{ + // Record information about this application in an Resource. + tracesdk.WithResource(resource.NewWithAttributes(semconv.SchemaURL, attrs...)), + } + + // Register all exporters with the options list + for _, exporter := range b.exporters { + // The non-syncing mode shall only be used in testing. The batching mode must be used in production. + if b.sync { + defaultTpOpts = append(defaultTpOpts, tracesdk.WithSyncer(exporter)) + } else { + defaultTpOpts = append(defaultTpOpts, tracesdk.WithBatcher(exporter)) + } + } + + // Make sure to order the defaultTpOpts first, so b.tpOpts can override the default ones + opts := append(defaultTpOpts, b.tpOpts...) + // Build the tracing provider + tpsdk := tracesdk.NewTracerProvider(opts...) + if b.log { + return NewLoggingTracerProvider(tpsdk), nil + } + return tpsdk, nil +} + +func (b *builder) InstallGlobally() error { + // First, build the tracing provider... + tp, err := b.Build() + if err != nil { + return err + } + // ... and register it globally + otel.SetTracerProvider(tp) + return nil +} + +// Shutdown tries to convert the trace.TracerProvider to a SDKTracerProvider to +// access its Shutdown method to make sure all traces have been flushed using the exporters +// before it's shutdown. If timeout == 0, the shutdown will be done without a grace period. +// If timeout > 0, the shutdown will have a grace period of that period of time to shutdown. +func Shutdown(ctx context.Context, tp trace.TracerProvider, timeout time.Duration) error { + return callSDKProvider(ctx, tp, timeout, func(ctx context.Context, sp SDKTracerProvider) error { + return sp.Shutdown(ctx) + }) +} + +// ForceFlush tries to convert the trace.TracerProvider to a SDKTracerProvider to +// access its ForceFlush method to make sure all traces have been flushed using the exporters. +// If timeout == 0, the flushing will be done without a grace period. +// If timeout > 0, the flushing will have a grace period of that period of time. +// Unlike Shutdown, which also flushes the traces, the provider is still operation after this. +func ForceFlush(ctx context.Context, tp trace.TracerProvider, timeout time.Duration) error { + return callSDKProvider(ctx, tp, timeout, func(ctx context.Context, sp SDKTracerProvider) error { + return sp.ForceFlush(ctx) + }) +} + +func callSDKProvider(ctx context.Context, tp trace.TracerProvider, timeout time.Duration, fn func(context.Context, SDKTracerProvider) error) error { + p, ok := tp.(SDKTracerProvider) + if !ok { + return nil + } + + if timeout != 0 { + // Do not make the application hang when it is shutdown. + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + return fn(ctx, p) +} + +// ShutdownGlobal shuts down the global TracerProvider using Shutdown() +func ShutdownGlobal(ctx context.Context, timeout time.Duration) error { + return Shutdown(ctx, otel.GetTracerProvider(), timeout) +} + +// ForceFlushGlobal flushes the global TracerProvider using ForceFlush() +func ForceFlushGlobal(ctx context.Context, timeout time.Duration) error { + return ForceFlush(ctx, otel.GetTracerProvider(), timeout) +} From 351dddc88b41913c191deb4b0b4a35687540f73f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:06:45 +0300 Subject: [PATCH 04/19] High-level tracing using a closure function --- pkg/tracing/tracing.go | 203 ++++++++++++++++++++++++++++++++++++ pkg/tracing/tracing_test.go | 54 ++++++++++ 2 files changed, 257 insertions(+) create mode 100644 pkg/tracing/tracing.go create mode 100644 pkg/tracing/tracing_test.go diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go new file mode 100644 index 00000000..5ae07f13 --- /dev/null +++ b/pkg/tracing/tracing.go @@ -0,0 +1,203 @@ +package tracing + +import ( + "context" + "errors" + "fmt" + "io" + "os" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" +) + +// FuncTracer is a higher-level type than the core trace.Tracer, which allows instrumenting +// a function running in a closure. It'll automatically create a span with the given name +// (plus maybe a pre-configured prefix). TraceFunc also returns a TraceFuncResult which allows +// the error to be instrumented automatically as well. +type FuncTracer interface { + trace.Tracer + // TraceFunc creates a trace with the given name while fn is executing. + // ErrFuncNotSupplied is returned if fn is nil. + TraceFunc(ctx context.Context, spanName string, fn TraceFunc, opts ...trace.SpanStartOption) TraceFuncResult +} + +// Context returns context.Background() if traceEnable is false (i.e. no tracing will happen), +// or a context that will report traces to the global TracerProvider if traceEnable is true. +func Context(traceEnable bool) context.Context { + if !traceEnable { + return context.Background() + } + + ctx := context.Background() + return trace.ContextWithSpan(ctx, &tracerProviderSpan{ + Span: trace.SpanFromContext(ctx), // will return a no-op span + useGlobal: true, + }) +} + +type tracerProviderSpan struct { + trace.Span + useGlobal bool +} + +func (s *tracerProviderSpan) TracerProvider() trace.TracerProvider { + // Override the TracerProvider call if useGlobal is set + if s.useGlobal { + return otel.GetTracerProvider() + } + return s.Span.TracerProvider() +} + +// TracerNamed is an interface that allows types to customize their +// name shown in traces. +type TracerNamed interface { + TracerName() string +} + +// FromContextUnnamed returns an unnamed FuncTracer. +func FromContextUnnamed(ctx context.Context) FuncTracer { + return FromContext(ctx, nil) +} + +// FromContext returns a FuncTracer from the context, along with a name described by +// obj. If obj is a string, that name is used. If obj is a TracerNamed, TracerName() is used, +// if it's os.Std{in,out,err}, "os.Std{in,out,err}" is used, and likewise for io.Discard. +// If obj is something else, the name is its type printed as fmt.Sprintf("%T", obj). If obj +// is nil, then it is unnamed. +func FromContext(ctx context.Context, obj interface{}) FuncTracer { + return FromProvider(trace.SpanFromContext(ctx).TracerProvider(), obj) +} + +// FromProvider makes a new FuncTracer with the name resolved as for FromContext. +func FromProvider(tp trace.TracerProvider, obj interface{}) FuncTracer { + name := tracerName(obj) + return funcTracer{name: name, tracer: tp.Tracer(name)} +} + +func tracerName(obj interface{}) string { + var name string + switch t := obj.(type) { + case string: + name = t + case TracerNamed: + name = t.TracerName() + case nil: + name = "" + default: + name = fmt.Sprintf("%T", obj) + } + + switch obj { + case os.Stdin: + name = "os.Stdin" + case os.Stdout: + name = "os.Stdout" + case os.Stderr: + name = "os.Stderr" + case io.Discard: + name = "io.Discard" + } + return name +} + +// TraceFuncResult can either just simply return the error from TraceFunc, or register the error using +// DefaultErrRegisterFunc (and then return it), or register the error using a custom error handling function. +// Important: The user MUST run one of these functions for the span to end. +// If none of these functions are called and hence the span is not ended, memory is leaked. +type TraceFuncResult interface { + // Error returns the error without any registration of it to the span. + Error() error + // Register registers the error using DefaultErrRegisterFunc. + Register() error + // RegisterCustom registers the error with the span using fn. + // ErrFuncNotSupplied is returned if fn is nil. + RegisterCustom(fn ErrRegisterFunc) error +} + +// ErrFuncNotSupplied is raised when a supplied function callback is nil. +var ErrFuncNotSupplied = errors.New("function argument not supplied") + +// MakeFuncNotSuppliedError formats ErrFuncNotSupplied in a standard way. +func MakeFuncNotSuppliedError(name string) error { + return fmt.Errorf("%w: %s", ErrFuncNotSupplied, name) +} + +// TraceFunc represents an instrumented function closure. +type TraceFunc func(context.Context, trace.Span) error + +// ErrRegisterFunc should register the return error of TraceFunc err with the span +type ErrRegisterFunc func(span trace.Span, err error) + +// funcTracer contains options for creating a trace.Tracer and FuncTracer. +type funcTracer struct { + name string + tracer trace.Tracer +} + +// SpanName appends the name of the given function (spanName) to the tracer +// name, if set. +func (o funcTracer) fmtSpanName(spanName string) string { + if len(o.name) != 0 && len(spanName) != 0 { + return o.name + "." + spanName + } + // As either (or both) o.Name and spanName are empty strings, we can add them together + name := o.name + spanName + if len(name) != 0 { + return name + } + return "" +} + +func (o funcTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return o.tracer.Start(ctx, o.fmtSpanName(spanName), opts...) +} + +func (o funcTracer) TraceFunc(ctx context.Context, spanName string, fn TraceFunc, opts ...trace.SpanStartOption) TraceFuncResult { + ctx, span := o.Start(ctx, spanName, opts...) + // Close the span first in the returned TraceFuncResult, to be able to register the error before + // the span stops recording events + + if fn == nil { + return &traceFuncResult{MakeFuncNotSuppliedError("FuncTracer.TraceFunc"), span} + } + return &traceFuncResult{fn(ctx, span), span} +} + +type traceFuncResult struct { + err error + span trace.Span +} + +func (r *traceFuncResult) Error() error { + // Important: Remember to end the span + r.span.End() + return r.err +} + +func (r *traceFuncResult) Register() error { + return r.RegisterCustom(DefaultErrRegisterFunc) +} + +func (r *traceFuncResult) RegisterCustom(fn ErrRegisterFunc) error { + if fn == nil { + err := multierr.Combine(r.err, MakeFuncNotSuppliedError("TraceFuncResult.RegisterCustom")) + DefaultErrRegisterFunc(r.span, err) + return err + } + + // Register the error with the span + fn(r.span, r.err) + // Important: Remember to end the span + r.span.End() + return r.err +} + +// DefaultErrRegisterFunc registers the error with the span using span.RecordError(err) +// if the error is non-nil, and then returns the error unchanged. +func DefaultErrRegisterFunc(span trace.Span, err error) { + if err != nil { + span.RecordError(err) + } +} diff --git a/pkg/tracing/tracing_test.go b/pkg/tracing/tracing_test.go new file mode 100644 index 00000000..102636e4 --- /dev/null +++ b/pkg/tracing/tracing_test.go @@ -0,0 +1,54 @@ +package tracing + +import ( + "bytes" + "fmt" + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_tracerName(t *testing.T) { + tests := []struct { + obj interface{} + want string + }{ + {"foo", "foo"}, + {trNamed{"bar"}, "bar"}, + {nil, ""}, + {bytes.NewBuffer(nil), "*bytes.Buffer"}, + {os.Stdin, "os.Stdin"}, + {os.Stdout, "os.Stdout"}, + {os.Stderr, "os.Stderr"}, + {io.Discard, "io.Discard"}, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + assert.Equal(t, tt.want, tracerName(tt.obj)) + }) + } +} + +type trNamed struct{ name string } + +func (t trNamed) TracerName() string { return t.name } + +func Test_funcTracer_fmtSpanName(t *testing.T) { + tests := []struct { + tracerName string + fnName string + want string + }{ + {tracerName: "Tracer", fnName: "Func", want: "Tracer.Func"}, + {tracerName: "", fnName: "Func", want: "Func"}, + {tracerName: "Tracer", fnName: "", want: "Tracer"}, + {tracerName: "", fnName: "", want: ""}, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + assert.Equal(t, tt.want, funcTracer{name: tt.tracerName}.fmtSpanName(tt.fnName)) + }) + } +} From d8d949714611c6039ff5a586762478bae0d56990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:08:42 +0300 Subject: [PATCH 05/19] Automatic logging generation from traces --- pkg/tracing/logging.go | 127 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 pkg/tracing/logging.go diff --git a/pkg/tracing/logging.go b/pkg/tracing/logging.go new file mode 100644 index 00000000..0a5ffddb --- /dev/null +++ b/pkg/tracing/logging.go @@ -0,0 +1,127 @@ +package tracing + +import ( + "context" + + "github.com/go-logr/logr" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" +) + +// TODO: Use this logging tracer provider to unit test the traces generated, and code executing generally + +// TODO: Allow fine-grained logging levels. + +// NewLoggingTracerProvider is a composite TracerProvider which automatically logs trace events +// created by trace spans using a logger given to the context using logr, or as configured by controller +// runtime. +func NewLoggingTracerProvider(tp trace.TracerProvider) SDKTracerProvider { + return &loggingTracerProvider{tp} +} + +type loggingTracerProvider struct { + tp trace.TracerProvider +} + +func (tp *loggingTracerProvider) Tracer(instrumentationName string, opts ...trace.TracerOption) trace.Tracer { + tracer := tp.tp.Tracer(instrumentationName, opts...) + return &loggingTracer{provider: tp, tracer: tracer, name: instrumentationName} +} + +func (tp *loggingTracerProvider) Shutdown(ctx context.Context) error { + p, ok := tp.tp.(SDKTracerProvider) + if !ok { + return nil + } + return p.Shutdown(ctx) +} + +func (tp *loggingTracerProvider) ForceFlush(ctx context.Context) error { + p, ok := tp.tp.(SDKTracerProvider) + if !ok { + return nil + } + return p.ForceFlush(ctx) +} + +type loggingTracer struct { + provider trace.TracerProvider + tracer trace.Tracer + name string +} + +func (t *loggingTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + // Acquire the logger from either the context or controller-runtime global + log := ctrllog.FromContext(ctx).WithName(t.name) + + // When starting up, log all given attributes. + spanCfg := trace.NewSpanStartConfig(opts...) + startLog := log + if len(spanCfg.Attributes()) != 0 { + startLog = startLog.WithValues(spanAttributesKey, spanCfg.Attributes()) + } + startLog.Info("starting span") + + // Call the composite tracer, but swap out the returned span for ours, both in the + // return value and context. + ctx, span := t.tracer.Start(ctx, spanName, opts...) + logSpan := &loggingSpan{t.provider, log, span, spanName} + ctx = trace.ContextWithSpan(ctx, logSpan) + return ctx, logSpan +} + +type loggingSpan struct { + provider trace.TracerProvider + log logr.Logger + span trace.Span + spanName string +} + +const ( + spanNameKey = "span-name" + spanEventKey = "span-event" + spanStatusCodeKey = "span-status-code" + spanStatusDescriptionKey = "span-status-description" + spanAttributesKey = "span-attributes" +) + +func (s *loggingSpan) End(options ...trace.SpanEndOption) { + s.log.Info("ending span") + s.span.End(options...) +} + +func (s *loggingSpan) AddEvent(name string, options ...trace.EventOption) { + s.log.Info("recorded span event", spanEventKey, name) + s.span.AddEvent(name, options...) +} + +func (s *loggingSpan) IsRecording() bool { return s.span.IsRecording() } + +func (s *loggingSpan) RecordError(err error, options ...trace.EventOption) { + s.log.Error(err, "recorded span error") + s.span.RecordError(err, options...) +} + +func (s *loggingSpan) SpanContext() trace.SpanContext { return s.span.SpanContext() } + +func (s *loggingSpan) SetStatus(code codes.Code, description string) { + s.log.Info("recorded span status change", + spanStatusCodeKey, code.String(), + spanStatusDescriptionKey, description) + s.span.SetStatus(code, description) +} + +func (s *loggingSpan) SetName(name string) { + s.log.Info("recorded span name change", spanNameKey, name) + s.log = s.log.WithValues(spanNameKey, name) + s.span.SetName(name) +} + +func (s *loggingSpan) SetAttributes(kv ...attribute.KeyValue) { + s.log.Info("recorded span attribute change", spanAttributesKey, kv) + s.span.SetAttributes(kv...) +} + +func (s *loggingSpan) TracerProvider() trace.TracerProvider { return s.provider } From b3882e976ee149e5df545f8d5e5e4d193028050f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:14:20 +0300 Subject: [PATCH 06/19] Implement YAML and JSON sanitation; move the comments copying code here --- .../sanitize}/comments/LICENSE | 0 .../sanitize}/comments/comments.go | 0 .../sanitize}/comments/comments_test.go | 3 +- .../sanitize}/comments/lost.go | 0 pkg/frame/sanitize/sanitize.go | 231 +++++++++ pkg/frame/sanitize/sanitize_test.go | 460 ++++++++++++++++++ 6 files changed, 693 insertions(+), 1 deletion(-) rename pkg/{serializer => frame/sanitize}/comments/LICENSE (100%) rename pkg/{serializer => frame/sanitize}/comments/comments.go (100%) rename pkg/{serializer => frame/sanitize}/comments/comments_test.go (99%) rename pkg/{serializer => frame/sanitize}/comments/lost.go (100%) create mode 100644 pkg/frame/sanitize/sanitize.go create mode 100644 pkg/frame/sanitize/sanitize_test.go diff --git a/pkg/serializer/comments/LICENSE b/pkg/frame/sanitize/comments/LICENSE similarity index 100% rename from pkg/serializer/comments/LICENSE rename to pkg/frame/sanitize/comments/LICENSE diff --git a/pkg/serializer/comments/comments.go b/pkg/frame/sanitize/comments/comments.go similarity index 100% rename from pkg/serializer/comments/comments.go rename to pkg/frame/sanitize/comments/comments.go diff --git a/pkg/serializer/comments/comments_test.go b/pkg/frame/sanitize/comments/comments_test.go similarity index 99% rename from pkg/serializer/comments/comments_test.go rename to pkg/frame/sanitize/comments/comments_test.go index 233feeec..dfd874bb 100644 --- a/pkg/serializer/comments/comments_test.go +++ b/pkg/frame/sanitize/comments/comments_test.go @@ -226,7 +226,8 @@ items: - c - b `, - }, { + }, + { name: "copy_item_comments_no_match", from: ` apiVersion: apps/v1 diff --git a/pkg/serializer/comments/lost.go b/pkg/frame/sanitize/comments/lost.go similarity index 100% rename from pkg/serializer/comments/lost.go rename to pkg/frame/sanitize/comments/lost.go diff --git a/pkg/frame/sanitize/sanitize.go b/pkg/frame/sanitize/sanitize.go new file mode 100644 index 00000000..90547db9 --- /dev/null +++ b/pkg/frame/sanitize/sanitize.go @@ -0,0 +1,231 @@ +package sanitize + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "strings" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments" + "k8s.io/utils/pointer" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Sanitizer is an interface for sanitizing frames. Note that a sanitizer can only do +// its work correctly if frame actually only contains one frame within. +type Sanitizer interface { + // Sanitize sanitizes the frame in a standardized way for the given + // FramingType. If the FramingType isn't known, the Sanitizer can choose between + // returning an ErrUnsupportedFramingType error or just returning frame, nil unmodified. + // If ErrUnsupportedFramingType is returned, the consumer won't probably be able to handle + // other framing types than the default ones, which might not be desired. + // + // The returned frame should have len == 0 if it's considered empty. + Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error) + + content.ContentTypeSupporter +} + +// defaultSanitizer implements frame sanitation for JSON and YAML. +// +// For YAML it removes unnecessary "---" separators, whitespace and newlines. +// The YAML frame always ends with a newline, unless the sanitized YAML was an empty string, in which +// case an empty string with len == 0 will be returned. +// +// For JSON it sanitizes the JSON frame by removing unnecessary spaces and newlines around it. +func NewJSONYAML(opts ...JSONYAMLOption) Sanitizer { + return &defaultSanitizer{defaultJSONYAMLOptions().applyOptions(opts)} +} + +func WithCompactIndent() JSONYAMLOption { + return WithSpacesIndent(0) +} + +func WithSpacesIndent(spaces uint8) JSONYAMLOption { + i := strings.Repeat(" ", int(spaces)) + return &jsonYAMLOptions{Indentation: &i} +} + +func WithTabsIndent(tabs uint8) JSONYAMLOption { + i := strings.Repeat("\t", int(tabs)) + return &jsonYAMLOptions{Indentation: &i} +} + +func WithCompactSeqIndent() JSONYAMLOption { + return &jsonYAMLOptions{ForceSeqIndentStyle: yaml.CompactSequenceStyle} +} + +func WithWideSeqIndent() JSONYAMLOption { + return &jsonYAMLOptions{ForceSeqIndentStyle: yaml.WideSequenceStyle} +} + +func WithNoCommentsCopy() JSONYAMLOption { + return &jsonYAMLOptions{CopyComments: pointer.Bool(false)} +} + +type JSONYAMLOption interface { + applyToJSONYAML(*jsonYAMLOptions) +} + +type jsonYAMLOptions struct { + // Only applicable to JSON at the moment; YAML indentation config not supported + Indentation *string + // Only applicable to YAML; either yaml.CompactSequenceStyle or yaml.WideSequenceStyle + ForceSeqIndentStyle yaml.SequenceIndentStyle + // Only applicable to YAML; JSON doesn't support comments + CopyComments *bool + /* + TODO: ForceMapKeyOrder that can either be + - PreserveOrder (default) => preserves the order from the prior if given. no-op if no prior. + - Alphabetic => sorts all keys alphabetically + - None => don't preserve order from the prior; no-op + */ +} + +func defaultJSONYAMLOptions() *jsonYAMLOptions { + return (&jsonYAMLOptions{ + Indentation: pointer.String(""), + CopyComments: pointer.Bool(true), + }) +} + +func (o *jsonYAMLOptions) applyToJSONYAML(target *jsonYAMLOptions) { + if o.Indentation != nil { + target.Indentation = o.Indentation + } + if len(o.ForceSeqIndentStyle) != 0 { + target.ForceSeqIndentStyle = o.ForceSeqIndentStyle + } + if o.CopyComments != nil { + target.CopyComments = o.CopyComments + } +} + +func (o *jsonYAMLOptions) applyOptions(opts []JSONYAMLOption) *jsonYAMLOptions { + for _, opt := range opts { + opt.applyToJSONYAML(o) + } + return o +} + +type defaultSanitizer struct { + opts *jsonYAMLOptions +} + +func (s *defaultSanitizer) Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error) { + switch ct { + case content.ContentTypeYAML: + return s.handleYAML(ctx, frame) + case content.ContentTypeJSON: + return s.handleJSON(frame) + default: + // Just passthrough + return frame, nil + } +} + +func (defaultSanitizer) SupportedContentTypes() content.ContentTypes { + return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON} +} + +var ErrTooManyFrames = errors.New("too many frames") + +func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte, error) { + // Get prior data, if any (from the context), that we'll use to copy comments over and + // infer the sequence indenting style. + priorData, hasPriorData := GetPriorData(ctx) + + // Parse the current node + frameNodes, err := (&kio.ByteReader{ + Reader: bytes.NewReader(append([]byte{'\n'}, frame...)), + DisableUnwrapping: true, + OmitReaderAnnotations: true, + }).Read() + if err != nil { + return nil, err + } + if len(frameNodes) == 0 { + return []byte{}, nil + } else if len(frameNodes) != 1 { + return nil, ErrTooManyFrames + } + frameNode := frameNodes[0] + + if hasPriorData && s.opts.CopyComments != nil && *s.opts.CopyComments { + priorNode, err := yaml.Parse(string(priorData)) + if err != nil { + return nil, err + } + // Copy comments over + if err := comments.CopyComments(priorNode, frameNode, true); err != nil { + return nil, err + } + } + + return yaml.MarshalWithOptions(frameNode.Document(), &yaml.EncoderOptions{ + SeqIndent: s.resolveSeqStyle(frame, priorData, hasPriorData), + }) +} + +func (s *defaultSanitizer) resolveSeqStyle(frame, priorData []byte, hasPriorData bool) yaml.SequenceIndentStyle { + // If specified, use these; can be used as "force-formatting" directives for consistency + if len(s.opts.ForceSeqIndentStyle) != 0 { + return s.opts.ForceSeqIndentStyle + } + // Otherwise, autodetect the indentation from prior data, if exists, or the current frame + // If the sequence style cannot be derived; the compact form will be used + var deriveYAML string + if hasPriorData { + deriveYAML = string(priorData) + } else { + deriveYAML = string(frame) + } + return yaml.SequenceIndentStyle(yaml.DeriveSeqIndentStyle(deriveYAML)) +} + +func (s *defaultSanitizer) handleJSON(frame []byte) ([]byte, error) { + // If it's all whitespace, just return an empty byte array, no actual content here + if len(bytes.TrimSpace(frame)) == 0 { + return []byte{}, nil + } + var buf bytes.Buffer + var err error + if s.opts.Indentation == nil || len(*s.opts.Indentation) == 0 { + err = json.Compact(&buf, frame) + } else { + err = json.Indent(&buf, frame, "", *s.opts.Indentation) + } + if err != nil { + return nil, err + } + // Trim all other spaces than an ending newline + return append(bytes.TrimSpace(buf.Bytes()), '\n'), nil +} + +func IfSupported(ctx context.Context, s Sanitizer, ct content.ContentType, frame []byte) ([]byte, error) { + // If the content type isn't supported, nothing to do + if s == nil || !s.SupportedContentTypes().Has(ct) { + return frame, nil + } + return s.Sanitize(ctx, ct, frame) +} + +// WithPriorData registers the given frame with the context such that the frame can be used +// as "prior data" when sanitizing. Prior data can be used to copy over YAML comments +// automatically from the prior data, remember the key order, sequence indentation level, etc. +func WithPriorData(ctx context.Context, frame []byte) context.Context { + return context.WithValue(ctx, priorDataKey, frame) +} + +// GetPriorData retrieves the prior data frame, if any, set using WithPriorData. +func GetPriorData(ctx context.Context) ([]byte, bool) { + b, ok := ctx.Value(priorDataKey).([]byte) + return b, ok +} + +type priorDataKeyStruct struct{} + +var priorDataKey = priorDataKeyStruct{} diff --git a/pkg/frame/sanitize/sanitize_test.go b/pkg/frame/sanitize/sanitize_test.go new file mode 100644 index 00000000..cb8682a3 --- /dev/null +++ b/pkg/frame/sanitize/sanitize_test.go @@ -0,0 +1,460 @@ +package sanitize + +import ( + "context" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content" +) + +func Test_defaultSanitizer_Sanitize(t *testing.T) { + tests := []struct { + name string + opts []JSONYAMLOption + ct content.ContentType + prior string + frame string + want string + wantErr error + checkErr func(error) bool + }{ + { + name: "passthrough whatever", + ct: content.ContentType("unknown"), + frame: "{randomdata:", + want: "{randomdata:", + }, + { + name: "default compact", + ct: content.ContentTypeJSON, + frame: `{ + "foo": { + "bar": "baz" + } + }`, + opts: []JSONYAMLOption{}, + want: `{"foo":{"bar":"baz"}} +`, + }, + { + name: "with two spaces", + ct: content.ContentTypeJSON, + frame: ` { "foo" : "bar" } +`, + opts: []JSONYAMLOption{WithSpacesIndent(2)}, + want: `{ + "foo": "bar" +} +`, + }, + { + name: "with four spaces", + ct: content.ContentTypeJSON, + frame: ` { "foo" : {"bar": "baz"} } +`, + opts: []JSONYAMLOption{WithSpacesIndent(4)}, + want: `{ + "foo": { + "bar": "baz" + } +} +`, + }, + { + name: "with tab indent", + ct: content.ContentTypeJSON, + frame: ` { "foo" : {"bar": "baz"} } +`, + opts: []JSONYAMLOption{WithTabsIndent(1)}, + want: `{ + "foo": { + "bar": "baz" + } +} +`, + }, + { + name: "with malformed", + ct: content.ContentTypeJSON, + frame: `{"foo":"`, + opts: []JSONYAMLOption{WithCompactIndent()}, + checkErr: func(err error) bool { + _, ok := err.(*json.SyntaxError) + return ok + }, + }, + { + name: "only whitespace", + ct: content.ContentTypeJSON, + frame: ` + + `, + want: "", + }, + { + name: "no json", + ct: content.ContentTypeJSON, + frame: "", + want: "", + }, + { + name: "weird empty formatting", + ct: content.ContentTypeYAML, + frame: ` +--- + + + `, + want: "", + }, + { + name: "no yaml", + ct: content.ContentTypeYAML, + frame: "", + want: "", + }, + { + name: "too many frames", + ct: content.ContentTypeYAML, + frame: `aa: true +--- +bb: false +`, + wantErr: ErrTooManyFrames, + }, + { + name: "make sure lists are not expanded", + ct: content.ContentTypeYAML, + frame: `--- +kind: List +apiVersion: "v1" +items: +- name: 123 +- name: 456 +`, + want: `kind: List +apiVersion: "v1" +items: +- name: 123 +- name: 456 +`, + }, + { + name: "yaml format; don't be confused by the bar commend", + ct: content.ContentTypeYAML, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: + # bar +- name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: +# bar +- name: 123 +`, + }, + { + name: "detect indentation; don't be confused by the bar commend", + ct: content.ContentTypeYAML, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: +# bar + - name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: + # bar + - name: 123 +`, + }, + { + name: "force compact", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{WithCompactSeqIndent()}, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: + # bar + - name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: +# bar +- name: 123 +`, + }, + { + name: "force wide", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{WithWideSeqIndent()}, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: +# bar +- name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: + # bar + - name: 123 +`, + }, + { + name: "invalid indentation", + ct: content.ContentTypeYAML, + frame: `--- + +kind: "foo" + bar: true`, + checkErr: func(err error) bool { + return err.Error() == "yaml: line 1: did not find expected key" + }, + }, + { + name: "infer seq style from prior; default is compact", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{}, + prior: `# root +# no lists here to look at + +kind: List # foo +# bla +apiVersion: v1 +`, + frame: `--- +kind: List +apiVersion: v1 +items: + - item1 # hello + - item2 +`, + want: `# root +# no lists here to look at + +kind: List # foo +# bla +apiVersion: v1 +items: +- item1 # hello +- item2 +`, + }, + { + name: "copy comments; infer seq style from prior", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{}, + prior: `# root +# hello + +kind: List # foo +# bla +apiVersion: v1 +notexist: foo # remember me! + +items: +# ignoreme + - item1 # hello + # bla + - item2 # hi + # after`, + frame: `--- +kind: List +apiVersion: v1 +fruits: +- fruit1 +items: +- item1 +- item2 +- item3 +`, + want: `# root +# hello +# Comments lost during file manipulation: +# Field "notexist": "remember me!" + +kind: List # foo +# bla +apiVersion: v1 +fruits: + - fruit1 +items: + # ignoreme + - item1 # hello + # bla + - item2 # hi + # after + + - item3 +`, + }, + { + name: "don't copy comments; infer from prior", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{WithNoCommentsCopy()}, + prior: `# root +# hello + +kind: List # foo +# bla +apiVersion: v1 +notexist: foo # remember me! + +items: +# ignoreme +- item1 # hello + # bla + - item2 # trying to trick the system; but it should make style choice based on item1 + # after`, + frame: `--- +kind: List +apiVersion: v1 +fruits: +- fruit1 # new +items: # new +- item1 +- item2 +# new +- item3 +`, + want: `kind: List +apiVersion: v1 +fruits: +- fruit1 # new +items: # new +- item1 +- item2 +# new +- item3 +`, + }, + { + name: "invalid prior", + ct: content.ContentTypeYAML, + prior: `# root +# hello + +kind: List # foo +# bla +apiVersion: v1 +notexist: foo # remember me! + +items: +# ignoreme + - item1 # hello + # bla +- item2 # trying to trick the system; but it should make style choice based on item1 + # after`, + frame: `--- +kind: List +apiVersion: v1 +fruits: +- fruit1 # new +items: # new +- item1 +- item2 +# new +- item3 +`, + checkErr: func(err error) bool { + return err.Error() == "yaml: line 3: did not find expected key" + }, + }, + { + name: "invalid copy comments; change from scalar to mapping node", + ct: content.ContentTypeYAML, + prior: `# root +foo: "bar" # baz`, + frame: ` +foo: + name: "bar" +`, + checkErr: func(err error) bool { + // from sigs.k8s.io/kustomize/kyaml/yaml/fns.go:728 + return err.Error() == `wrong Node Kind for expected: ScalarNode was MappingNode: value: {name: "bar"}` + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + s := NewJSONYAML(tt.opts...) + if len(tt.prior) != 0 { + ctx = WithPriorData(ctx, []byte(tt.prior)) + } + got, err := s.Sanitize(ctx, tt.ct, []byte(tt.frame)) + assert.Equal(t, tt.want, string(got)) + if tt.checkErr != nil { + assert.True(t, tt.checkErr(err)) + } else { + assert.ErrorIs(t, err, tt.wantErr) + } + }) + } +} + +func TestIfSupported(t *testing.T) { + ctx := context.Background() + tests := []struct { + name string + s Sanitizer + ct content.ContentType + frame string + want string + wantErr bool + }{ + { + name: "nil sanitizer", + frame: "foo", + want: "foo", + }, + { + name: "unknown content type", + s: NewJSONYAML(), + ct: content.ContentType("unknown"), + frame: "foo", + want: "foo", + }, + { + name: "sanitize", + s: NewJSONYAML(WithCompactIndent()), + ct: content.ContentTypeJSON, + frame: ` { "foo" : true } `, + want: `{"foo":true} +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, _ := IfSupported(ctx, tt.s, tt.ct, []byte(tt.frame)) + assert.Equal(t, tt.want, string(got)) + }) + } +} From bf15542738c8164bbb293eeea201c1278f7a4630 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:19:02 +0300 Subject: [PATCH 07/19] Check in the metadata headers interface --- pkg/content/metadata/metadata.go | 148 ++++++++++++++++++++++++++ pkg/content/metadata/metadata_test.go | 49 +++++++++ 2 files changed, 197 insertions(+) create mode 100644 pkg/content/metadata/metadata.go create mode 100644 pkg/content/metadata/metadata_test.go diff --git a/pkg/content/metadata/metadata.go b/pkg/content/metadata/metadata.go new file mode 100644 index 00000000..ec565647 --- /dev/null +++ b/pkg/content/metadata/metadata.go @@ -0,0 +1,148 @@ +// Metadata contains an interface to work with HTTP-like headers carrying metadata about +// some content. +package metadata + +import ( + "mime" + "net/textproto" + "net/url" + "strconv" + "strings" +) + +/* + Metadata origin in the system by default: + + content.FromFile -> content.Reader + - X-Content-Location + - Content-Length + + content.FromBytes -> content.Reader + - Content-Length + + content.FromString -> content.Reader + - Content-Length + + content.ToFile -> content.Writer + - X-Content-Location + + content.ToBuffer -> content.Writer + + frame.NewYAMLReader -> frame.Reader + - Content-Type => YAML + + frame.NewJSONReader -> frame.Reader + - Content-Type => JSON + + frame.newRecognizingReader -> frame.Reader + - If Content-Type is set, use that ContentType + - If X-Content-Location is set, try deduce ContentType from that + - Peek the stream, and try to deduce the ContentType from that + +*/ + +const ( + // "Known" headers to the system by default, but any other header can also be attached + + XContentLocationKey = "X-Content-Location" + + ContentLengthKey = "Content-Length" + ContentTypeKey = "Content-Type" + AcceptKey = "Accept" + + // TODO: Add Content-Encoding and Last-Modified? +) + +type HeaderOption interface { + // TODO: Rename to ApplyMetadataHeader? + ApplyToHeader(target Header) +} + +var _ HeaderOption = setHeaderOption{} + +func SetOption(k, v string) HeaderOption { + return setHeaderOption{Key: k, Value: v} +} + +func WithContentLength(len int64) HeaderOption { + return SetOption(ContentLengthKey, strconv.FormatInt(len, 10)) +} + +func WithContentLocation(loc string) HeaderOption { + return SetOption(XContentLocationKey, loc) +} + +func WithAccept(accepts ...string) HeaderOption { + return addHeaderOption{Key: AcceptKey, Values: accepts} +} + +type setHeaderOption struct{ Key, Value string } + +func (o setHeaderOption) ApplyToHeader(target Header) { + target.Set(o.Key, o.Value) +} + +type addHeaderOption struct { + Key string + Values []string +} + +func (o addHeaderOption) ApplyToHeader(target Header) { + for _, val := range o.Values { + target.Add(o.Key, val) + } +} + +// Make sure the interface is compatible with the targeted textproto.MIMEHeader +var _ Header = textproto.MIMEHeader{} + +// Express the string-string map interface of the net/textproto.Header map +type Header interface { + Add(key, value string) + Set(key, value string) + Get(key string) string + Values(key string) []string + Del(key string) +} + +// TODO: Public or private? + +func GetString(m Header, key string) (string, bool) { + if len(m.Values(key)) == 0 { + return "", false + } + return m.Get(key), true +} + +func GetInt64(m Header, key string) (int64, bool) { + i, err := strconv.ParseInt(m.Get(key), 10, 64) + if err != nil { + return 0, false + } + return i, true +} + +func GetURL(m Header, key string) (*url.URL, bool) { + str, ok := GetString(m, key) + if !ok { + return nil, false + } + u, err := url.Parse(str) + if err != nil { + return nil, false + } + return u, true +} + +func GetMediaTypes(m Header, key string) (mediaTypes []string, err error) { + for _, commaSepVal := range m.Values(key) { + for _, mediaTypeStr := range strings.Split(commaSepVal, ",") { + mediaType, _, err := mime.ParseMediaType(mediaTypeStr) + if err != nil { + return nil, err + } + mediaTypes = append(mediaTypes, mediaType) + } + } + return +} diff --git a/pkg/content/metadata/metadata_test.go b/pkg/content/metadata/metadata_test.go new file mode 100644 index 00000000..ea094825 --- /dev/null +++ b/pkg/content/metadata/metadata_test.go @@ -0,0 +1,49 @@ +package metadata + +import ( + "net/textproto" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetMediaTypes(t *testing.T) { + tests := []struct { + name string + opts []HeaderOption + key string + wantMediaTypes []string + wantErr error + }{ + { + name: "multiple keys, and values in one key", + opts: []HeaderOption{ + WithAccept("application/yaml", "application/xml"), + WithAccept("application/json"), + WithAccept("text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8"), + }, + key: AcceptKey, + wantMediaTypes: []string{ + "application/yaml", + "application/xml", + "application/json", + "text/html", + "application/xhtml+xml", + "application/xml", + "image/webp", + "*/*", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := textproto.MIMEHeader{} + for _, opt := range tt.opts { + opt.ApplyToHeader(h) + } + gotMediaTypes, err := GetMediaTypes(h, tt.key) + assert.Equal(t, tt.wantMediaTypes, gotMediaTypes) + assert.ErrorIs(t, err, tt.wantErr) + }) + } +} From 60413fad70c71139aab057b3b0bdc67760d13440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:26:26 +0300 Subject: [PATCH 08/19] Implement traceable composite io.Readers that allow propagating metadata and contexts. --- pkg/content/constructors.go | 129 +++++++++++++ pkg/content/errors.go | 43 +++++ pkg/content/interfaces.go | 145 +++++++++++++++ pkg/content/metadata.go | 101 ++++++++++ pkg/content/reader.go | 244 +++++++++++++++++++++++++ pkg/content/reader_test.go | 62 +++++++ pkg/content/recognizing.go | 203 ++++++++++++++++++++ pkg/content/recognizing_reader_test.go | 96 ++++++++++ pkg/content/recognizing_test.go | 69 +++++++ pkg/content/segment_reader.go | 63 +++++++ pkg/content/tracing.go | 33 ++++ pkg/content/writer.go | 121 ++++++++++++ 12 files changed, 1309 insertions(+) create mode 100644 pkg/content/constructors.go create mode 100644 pkg/content/errors.go create mode 100644 pkg/content/interfaces.go create mode 100644 pkg/content/metadata.go create mode 100644 pkg/content/reader.go create mode 100644 pkg/content/reader_test.go create mode 100644 pkg/content/recognizing.go create mode 100644 pkg/content/recognizing_reader_test.go create mode 100644 pkg/content/recognizing_test.go create mode 100644 pkg/content/segment_reader.go create mode 100644 pkg/content/tracing.go create mode 100644 pkg/content/writer.go diff --git a/pkg/content/constructors.go b/pkg/content/constructors.go new file mode 100644 index 00000000..18df7e2a --- /dev/null +++ b/pkg/content/constructors.go @@ -0,0 +1,129 @@ +package content + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing/iotest" + + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +// newErrReader makes a Reader implementation that only returns the given error on Read() +func newErrReader(err error, opts ...metadata.HeaderOption) Reader { + return NewReader(iotest.ErrReader(err), opts...) +} + +const ( + stdinPath = "/dev/stdin" + stdoutPath = "/dev/stdout" + stderrPath = "/dev/stderr" +) + +func FromStdin(opts ...metadata.HeaderOption) Reader { + return FromFile(stdinPath, opts...) +} + +// FromFile returns an io.ReadCloser from the given file, or an io.ReadCloser which returns +// the given file open error when read. +func FromFile(filePath string, opts ...metadata.HeaderOption) Reader { + // Support stdin + if filePath == "-" || filePath == stdinPath { + // Mark the source as /dev/stdin + opts = append(opts, metadata.WithContentLocation(stdinPath)) + // TODO: Maybe have a way to override the TracerName through Metadata? + return NewReader(os.Stdin, opts...) + } + + // Report the file path in the X-Content-Location header + opts = append(opts, metadata.WithContentLocation(filePath)) + + // Open the file + f, err := os.Open(filePath) + if err != nil { + return newErrReader(err, opts...) + } + fi, err := f.Stat() + if err != nil { + return newErrReader(err, opts...) + } + + // Register the Content-Length header + opts = append(opts, metadata.WithContentLength(fi.Size())) + + return NewReader(f, opts...) +} + +// FromBytes returns an io.Reader from the given byte content. +func FromBytes(content []byte, opts ...metadata.HeaderOption) Reader { + // Register the Content-Length + opts = append(opts, metadata.WithContentLength(int64(len(content)))) + // Read from a *bytes.Reader + return NewReader(bytes.NewReader(content), opts...) +} + +// FromString returns an io.Reader from the given string content. +func FromString(content string, opts ...metadata.HeaderOption) Reader { + // Register the Content-Length + opts = append(opts, metadata.WithContentLength(int64(len(content)))) + // Read from a *strings.Reader + return NewReader(strings.NewReader(content), opts...) +} + +// TODO: FromHTTPResponse and ToHTTPResponse + +func ToStdout(opts ...metadata.HeaderOption) Writer { + return ToFile(stdoutPath, opts...) +} +func ToStderr(opts ...metadata.HeaderOption) Writer { + return ToFile(stderrPath, opts...) +} +func ToBuffer(buf *bytes.Buffer, opts ...metadata.HeaderOption) Writer { + return NewWriter(buf, opts...) +} + +func ToFile(filePath string, opts ...metadata.HeaderOption) Writer { + // Shorthands for pipe IO + if filePath == "-" || filePath == stdoutPath { + // Mark the target as /dev/stdout + opts = append(opts, metadata.WithContentLocation(stdoutPath)) + return NewWriter(os.Stdout, opts...) + } + if filePath == stderrPath { + // Mark the target as /dev/stderr + opts = append(opts, metadata.WithContentLocation(stderrPath)) + return NewWriter(os.Stderr, opts...) + } + + // Report the file path in the X-Content-Location header + opts = append(opts, metadata.WithContentLocation(filePath)) + + // Make sure all directories are created + if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { + return newErrWriter(err, opts...) + } + + // Create or truncate the file + f, err := os.Create(filePath) + if err != nil { + return newErrWriter(err, opts...) + } + + // Register the Content-Length header + fi, err := f.Stat() + if err != nil { + return newErrWriter(err, opts...) + } + opts = append(opts, metadata.WithContentLength(fi.Size())) + + return NewWriter(f, opts...) +} + +func newErrWriter(err error, opts ...metadata.HeaderOption) Writer { + return NewWriter(&errWriter{err}, opts...) +} + +type errWriter struct{ err error } + +func (w *errWriter) Write([]byte) (int, error) { return 0, w.err } diff --git a/pkg/content/errors.go b/pkg/content/errors.go new file mode 100644 index 00000000..164e3c89 --- /dev/null +++ b/pkg/content/errors.go @@ -0,0 +1,43 @@ +package content + +import ( + "fmt" + + "github.com/weaveworks/libgitops/pkg/util/structerr" +) + +// Enforce all struct errors implementing structerr.StructError +var _ structerr.StructError = &UnsupportedContentTypeError{} + +// ErrUnsupportedContentType creates a new *UnsupportedContentTypeError +func ErrUnsupportedContentType(unsupported ContentType, supported ...ContentType) *UnsupportedContentTypeError { + return &UnsupportedContentTypeError{Unsupported: unsupported, Supported: supported} +} + +// UnsupportedContentTypeError describes that the supplied content type is not supported by an +// implementation handling different content types. +// +// This error can be checked for equality using errors.Is(err, &UnsupportedContentTypeError{}) +type UnsupportedContentTypeError struct { + // Unsupported is the content type that was given but not supported + // +required + Unsupported ContentType + // Supported is optional; if len(Supported) != 0, it lists the content types that indeed + // are supported by the implementation. If len(Supported) == 0, it should not be used + // as an indicator. + // +optional + Supported []ContentType +} + +func (e *UnsupportedContentTypeError) Error() string { + msg := fmt.Sprintf("unsupported content type: %q", e.Unsupported) + if len(e.Supported) != 0 { + msg = fmt.Sprintf("%s. supported content types: %v", msg, e.Supported) + } + return msg +} + +func (e *UnsupportedContentTypeError) Is(target error) bool { + _, ok := target.(*UnsupportedContentTypeError) + return ok +} diff --git a/pkg/content/interfaces.go b/pkg/content/interfaces.go new file mode 100644 index 00000000..a9d85409 --- /dev/null +++ b/pkg/content/interfaces.go @@ -0,0 +1,145 @@ +package content + +import ( + "context" + "fmt" + "io" + + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +var _ fmt.Stringer = ContentType("") + +// ContentType specifies the content type of some content. +// Ideally, a standard MIME notation like "application/json" shall be used. +type ContentType string + +const ( + ContentTypeYAML ContentType = "application/yaml" + ContentTypeJSON ContentType = "application/json" +) + +func (ct ContentType) ContentType() ContentType { return ct } +func (ct ContentType) String() string { return string(ct) } + +type ContentTypes []ContentType + +func (cts ContentTypes) Has(want ContentType) bool { + for _, ct := range cts { + if ct == want { + return true + } + } + return false +} + +func WithContentType(ct ContentType) metadata.HeaderOption { + return metadata.SetOption(metadata.ContentTypeKey, ct.String()) +} + +// ContentTyped is an interface that contains and/or supports one content type. +type ContentTyped interface { + ContentType() ContentType +} + +// ContentTypeSupporter supports potentially multiple content types. +type ContentTypeSupporter interface { + // Order _might_ carry a meaning + SupportedContentTypes() ContentTypes +} + +// underlying is the underlying stream of the Reader. +// If the returned io.Reader does not implement io.Closer, +// the underlying.Close() method will be re-used. +type WrapReaderFunc func(underlying io.ReadCloser) io.Reader + +type WrapWriterFunc func(underlying io.WriteCloser) io.Writer + +type WrapReaderToSegmentFunc func(underlying io.ReadCloser) RawSegmentReader + +// TODO: More documentation on these types. + +// Reader is a tracing-capable and metadata-bound io.Reader and io.Closer +// wrapper. It is NOT thread-safe by default. It supports introspection +// of composite ReadClosers. The TracerProvider from the given context +// is used. +// +// The Reader reads the current span from the given context, and uses that +// span's TracerProvider to create a Tracer and then also a new Span for +// the current operation. +type Reader interface { + // These call the underlying Set/ClearContext functions before/after + // reads and closes, and then uses the underlying io.ReadCloser. + // If the underlying Reader doesn't support closing, the returned + // Close method will only log a "CloseNoop" trace and exit with err == nil. + WithContext(ctx context.Context) io.ReadCloser + + // This reader supports registering metadata about the content it + // is reading. + MetadataContainer + + // Wrap returns a new Reader with io.ReadCloser B that reads from + // the current Reader's underlying io.ReadCloser A. If the returned + // B is an io.ReadCloser or this Reader's HasCloser() is true, + // HasCloser() of the returned Reader will be true, otherwise false. + Wrap(fn WrapReaderFunc) Reader + WrapSegment(fn WrapReaderToSegmentFunc) SegmentReader +} + +type RawSegmentReader interface { + Read() ([]byte, error) +} + +type ClosableRawSegmentReader interface { + RawSegmentReader + io.Closer +} + +type SegmentReader interface { + WithContext(ctx context.Context) ClosableRawSegmentReader + + MetadataContainer +} + +// In the future, one can implement a WrapSegment function that is of +// the following form: +// WrapSegment(name string, fn WrapSegmentFunc) SegmentReader +// where WrapSegmentFunc is func(underlying ClosableRawSegmentReader) RawSegmentReader +// This allows chaining simple composite SegmentReaders + +type Writer interface { + WithContext(ctx context.Context) io.WriteCloser + + // This writer supports registering metadata about the content it + // is writing and the destination it is writing to. + MetadataContainer + + Wrap(fn WrapWriterFunc) Writer +} + +type readerInternal interface { + Reader + RawReader() io.Reader + RawCloser() io.Closer +} + +type segmentReaderInternal interface { + SegmentReader + RawSegmentReader() RawSegmentReader + RawCloser() io.Closer +} + +type writerInternal interface { + Writer + RawWriter() io.Writer + RawCloser() io.Closer +} + +// The internal implementation structs should implement the +// ...Internal interfaces, in order to expose their raw, underlying resources +// just in case it is _really_ needed upstream (e.g. for testing). It is not +// exposed by default in the interface to avoid showing up in Godoc, as it +// most often shouldn't be used. +var _ readerInternal = &reader{} +var _ segmentReaderInternal = &segmentReader{} +var _ writerInternal = &writer{} diff --git a/pkg/content/metadata.go b/pkg/content/metadata.go new file mode 100644 index 00000000..a17f3f16 --- /dev/null +++ b/pkg/content/metadata.go @@ -0,0 +1,101 @@ +package content + +import ( + "encoding/json" + "net/textproto" + "net/url" + + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +// Metadata is the interface that's common to contentMetadataOptions and a wrapper +// around a HTTP request. +type Metadata interface { + metadata.Header + metadata.HeaderOption + + // Apply applies the given Options to itself and returns itself, without + // any deep-copying. + Apply(opts ...metadata.HeaderOption) Metadata + // ContentLength retrieves the standard "Content-Length" header + ContentLength() (int64, bool) + // ContentType retrieves the standard "Content-Type" header + ContentType() (ContentType, bool) + // ContentLocation retrieves the custom "X-Content-Location" header + ContentLocation() (*url.URL, bool) + + // Clone makes a deep copy of the Metadata + // TODO: Do we need this anymore? + Clone() Metadata + + ToContainer() MetadataContainer +} + +var _ Metadata = contentMetadata{} + +var _ json.Marshaler = contentMetadata{} + +func (m contentMetadata) MarshalJSON() ([]byte, error) { + return json.Marshal(m.MIMEHeader) +} + +func (m contentMetadata) ApplyToHeader(target metadata.Header) { + for k, vals := range m.MIMEHeader { + for i, val := range vals { + if i == 0 { + target.Set(k, val) + } else { + target.Add(k, val) + } + } + } +} + +func (m contentMetadata) Apply(opts ...metadata.HeaderOption) Metadata { + for _, opt := range opts { + opt.ApplyToHeader(m) + } + return m +} + +func (m contentMetadata) ContentLength() (int64, bool) { + return metadata.GetInt64(m, metadata.ContentLengthKey) +} + +func (m contentMetadata) ContentType() (ContentType, bool) { + ct, ok := metadata.GetString(m, metadata.ContentTypeKey) + return ContentType(ct), ok +} + +func (m contentMetadata) ContentLocation() (*url.URL, bool) { + return metadata.GetURL(m, metadata.XContentLocationKey) +} + +func (m contentMetadata) ToContainer() MetadataContainer { + return &metadataContainer{m} +} + +func (m contentMetadata) Clone() Metadata { + m2 := make(textproto.MIMEHeader, len(m.MIMEHeader)) + for k, v := range m.MIMEHeader { + m2[k] = v + } + return contentMetadata{m2} +} + +type MetadataContainer interface { + // ContentMetadata + ContentMetadata() Metadata +} + +func NewMetadata(opts ...metadata.HeaderOption) Metadata { + return contentMetadata{MIMEHeader: textproto.MIMEHeader{}}.Apply(opts...) +} + +type contentMetadata struct { + textproto.MIMEHeader +} + +type metadataContainer struct{ m Metadata } + +func (b *metadataContainer) ContentMetadata() Metadata { return b.m } diff --git a/pkg/content/reader.go b/pkg/content/reader.go new file mode 100644 index 00000000..e417096b --- /dev/null +++ b/pkg/content/reader.go @@ -0,0 +1,244 @@ +package content + +import ( + "context" + "errors" + "io" + "os" + + "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" +) + +type contextLock interface { + setContext(ctx context.Context) + clearContext() +} + +type contextLockImpl struct { + ctx context.Context +} + +func (l *contextLockImpl) setContext(ctx context.Context) { l.ctx = ctx } +func (l *contextLockImpl) clearContext() { l.ctx = nil } + +type readContextLockImpl struct { + contextLockImpl + r io.Reader + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (r *readContextLockImpl) Read(p []byte) (n int, err error) { + ft := tracing.FromContext(r.ctx, r.r) + err = ft.TraceFunc(r.ctx, "Read", func(ctx context.Context, span trace.Span) error { + var tmperr error + if r.underlyingLock != nil { + r.underlyingLock.setContext(ctx) + } + n, tmperr = r.r.Read(p) + if r.underlyingLock != nil { + r.underlyingLock.clearContext() + } + // Register metadata in the span + span.SetAttributes(SpanAttrByteContentCap(p[:n], len(p))...) + return tmperr + }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).RegisterCustom(SpanRegisterReadError) + return +} + +type closeContextLockImpl struct { + contextLockImpl + c io.Closer + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (c *closeContextLockImpl) Close() error { + spanName := "Close" + if c.c == nil { + spanName = "CloseNoop" + } + + ft := tracing.FromContext(c.ctx, c.c) + return ft.TraceFunc(c.ctx, spanName, func(ctx context.Context, _ trace.Span) error { + // Don't close if c.c is nil + if c.c == nil { + return nil + } + + if c.underlyingLock != nil { + c.underlyingLock.setContext(ctx) + } + // Close the underlying resource + err := c.c.Close() + if c.underlyingLock != nil { + c.underlyingLock.clearContext() + } + return err + }, trace.WithAttributes(SpanAttrContentMetadata(c.metaGetter.ContentMetadata()))).Register() +} + +type reader struct { + MetadataContainer + read *readContextLockImpl + close *closeContextLockImpl +} + +type readerWithContext struct { + read *readContextLockImpl + ctx context.Context +} + +func (r *readerWithContext) Read(p []byte) (n int, err error) { + r.read.setContext(r.ctx) + n, err = r.read.Read(p) + r.read.clearContext() + return +} + +type closerWithContext struct { + close *closeContextLockImpl + ctx context.Context +} + +func (r *closerWithContext) Close() error { + r.close.setContext(r.ctx) + err := r.close.Close() + r.close.clearContext() + return err +} + +func (r *reader) WithContext(ctx context.Context) io.ReadCloser { + return compositeio.ReadCloser(&readerWithContext{r.read, ctx}, &closerWithContext{r.close, ctx}) +} +func (r *reader) RawReader() io.Reader { return r.read.r } +func (r *reader) RawCloser() io.Closer { return r.close.c } + +// Maybe allow adding extra attributes at the end? +func (r *reader) Wrap(wrapFn WrapReaderFunc) Reader { + newReader := wrapFn(compositeio.ReadCloser(r.read, r.close)) + if newReader == nil { + panic("newReader must not be nil") + } + // If an io.Closer is not returned, close this + // Reader's stream instead. Importantly enough, + // a trace will be registered for both this + // Reader, and the returned one. + newCloser, ok := newReader.(io.Closer) + if !ok { + newCloser = r.close + } + + mb := r.ContentMetadata().Clone().ToContainer() + + return &reader{ + MetadataContainer: mb, + read: &readContextLockImpl{ + r: newReader, + metaGetter: mb, + underlyingLock: r.read, + }, + close: &closeContextLockImpl{ + c: newCloser, + metaGetter: mb, + underlyingLock: r.close, + }, + } +} + +func (r *reader) WrapSegment(wrapFn WrapReaderToSegmentFunc) SegmentReader { + newSegmentReader := wrapFn(compositeio.ReadCloser(r.read, r.close)) + if newSegmentReader == nil { + panic("newSegmentReader must not be nil") + } + + // If an io.Closer is not returned, close this + // Reader's stream instead. Importantly enough, + // a trace will be registered for both this + // Reader, and the returned one. + newCloser, ok := newSegmentReader.(io.Closer) + if !ok { + newCloser = r.close + } + + mb := r.ContentMetadata().Clone().ToContainer() + + return &segmentReader{ + MetadataContainer: mb, + read: &readSegmentContextLockImpl{ + r: newSegmentReader, + metaGetter: mb, + underlyingLock: r.read, + }, + close: &closeContextLockImpl{ + c: newCloser, + metaGetter: mb, + underlyingLock: r.close, + }, + } +} + +func NewReader(r io.Reader, opts ...metadata.HeaderOption) Reader { + // If it already is a Reader, just return it + rr, ok := r.(Reader) + if ok { + return rr + } + + // Use the closer if available + c, _ := r.(io.Closer) + // Never close stdio + if isStdio(r) { + c = nil + } + mb := NewMetadata(opts...).ToContainer() + + return &reader{ + MetadataContainer: mb, + read: &readContextLockImpl{ + r: r, + metaGetter: mb, + // underlyingLock is nil + }, + close: &closeContextLockImpl{ + c: c, + metaGetter: mb, + // underlyingLock is nil + }, + } +} + +func isStdio(s interface{}) bool { + f, ok := s.(*os.File) + if !ok { + return false + } + return int(f.Fd()) < 3 +} + +// SpanRegisterReadError registers io.EOF as an "event", and other errors as "unknown errors" in the trace +func SpanRegisterReadError(span trace.Span, err error) { + // Register the error with the span. EOF is expected at some point, + // hence, register that as an event instead of an error + if errors.Is(err, io.EOF) { + span.AddEvent("EOF") + } else if err != nil { + span.RecordError(err) + } +} + +type ResetCounterFunc func() + +func WrapLimited(r Reader, maxFrameSize limitedio.Limit) (Reader, ResetCounterFunc) { + var reset ResetCounterFunc + limitedR := r.Wrap(func(underlying io.ReadCloser) io.Reader { + lr := limitedio.NewReader(underlying, maxFrameSize) + reset = lr.ResetCounter + return lr + }) + return limitedR, reset +} diff --git a/pkg/content/reader_test.go b/pkg/content/reader_test.go new file mode 100644 index 00000000..98b6aea3 --- /dev/null +++ b/pkg/content/reader_test.go @@ -0,0 +1,62 @@ +package content + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_isStdio(t *testing.T) { + tmp := t.TempDir() + f, err := os.Create(filepath.Join(tmp, "foo.txt")) + require.Nil(t, err) + defer f.Close() + tests := []struct { + name string + in interface{} + want bool + }{ + { + name: "os.Stdin", + in: os.Stdin, + want: true, + }, + { + name: "os.Stdout", + in: os.Stdout, + want: true, + }, + { + name: "os.Stderr", + in: os.Stderr, + want: true, + }, + { + name: "*bytes.Buffer", + in: bytes.NewBufferString("FooBar"), + }, + { + name: "*strings.Reader", + in: strings.NewReader("FooBar"), + }, + { + name: "*strings.Reader", + in: strings.NewReader("FooBar"), + }, + { + name: "*os.File", + in: f, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isStdio(tt.in) + assert.Equal(t, got, tt.want) + }) + } +} diff --git a/pkg/content/recognizing.go b/pkg/content/recognizing.go new file mode 100644 index 00000000..ed3d198b --- /dev/null +++ b/pkg/content/recognizing.go @@ -0,0 +1,203 @@ +package content + +import ( + "bufio" + "bytes" + "context" + "errors" + "io" + "path/filepath" + + "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "go.opentelemetry.io/otel/trace" + yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/yaml" +) + +const peekSize = 2048 + +type ContentTypeRecognizer interface { + FromContentMetadata(m Metadata) (ct ContentType, ok bool) + FromPeekBytes(peek []byte) (ct ContentType, ok bool) + + // SupportedContentTypes() tells about what ContentTypes are supported by this recognizer + ContentTypeSupporter +} + +func NewJSONYAMLRecognizingReader(ctx context.Context, r Reader) (Reader, ContentType, error) { + return NewRecognizingReader(ctx, r, NewJSONYAMLContentTypeRecognizer()) +} + +func NewRecognizingReader(ctx context.Context, r Reader, ctrec ContentTypeRecognizer) (Reader, ContentType, error) { + // If r already has Content-Type set, all good + meta := r.ContentMetadata() + ct, ok := meta.ContentType() + if ok { + return r, ct, nil + } + + // Try to resolve the Content-Type from the X-Content-Location header + ct, ok = ctrec.FromContentMetadata(meta) + if ok { + meta.Apply(WithContentType(ct)) + return r, ct, nil + } + + var newr Reader + err := tracing.FromContext(ctx, "content").TraceFunc(ctx, "NewRecognizingReader", + func(ctx context.Context, span trace.Span) error { + + // Use the context to access the io.ReadCloser + rc := r.WithContext(ctx) + meta := r.ContentMetadata().Clone() + + bufr := bufio.NewReaderSize(rc, peekSize) + + peek, err := bufr.Peek(peekSize) + if err != nil && !errors.Is(err, io.EOF) { + return err + } + + // Write to ct defined earlier, that value will be returned if err == nil + ct, ok = ctrec.FromPeekBytes(peek) + if !ok { + // TODO: Struct error; include the peek in the context too + return errors.New("couldn't recognize content type") + } + + // Set the right recognized content type + meta.Apply(WithContentType(ct)) + + // Read from the buffered bufio.Reader, because we have already peeked + // data from the underlying rc. Close rc when done. + newr = NewReader(compositeio.ReadCloser(bufr, rc), meta) + return nil + }).Register() + if err != nil { + return nil, "", err + } + + return newr, ct, nil +} + +func NewRecognizingWriter(w Writer, ctrec ContentTypeRecognizer) (Writer, ContentType, error) { + // If r already has Content-Type set, all good + meta := w.ContentMetadata() + ct, ok := meta.ContentType() + if ok { + return w, ct, nil + } + + // Try to resolve the Content-Type from the X-Content-Location header + ct, ok = ctrec.FromContentMetadata(meta) + if ok { + meta.Apply(WithContentType(ct)) + return w, ct, nil + } + + // Negotiate the Accept header + ct, ok = negotiateAccept(meta, ctrec.SupportedContentTypes()) + if ok { + meta.Apply(WithContentType(ct)) + return w, ct, nil + } + + return nil, "", errors.New("couldn't recognize content type") +} + +const acceptAll ContentType = "*/*" + +func negotiateAccept(meta Metadata, supportedTypes []ContentType) (ContentType, bool) { + accepts, err := metadata.GetMediaTypes(meta, metadata.AcceptKey) + if err != nil { + return "", false + } + + // prioritize the order that the metadata is asking for. supported is in priority order too + for _, accept := range accepts { + for _, supported := range supportedTypes { + if matchesAccept(ContentType(accept), supported) { + return supported, true + } + } + } + return "", false +} + +func matchesAccept(accept, supported ContentType) bool { + if accept == acceptAll { + return true + } + return accept == supported +} + +func NewJSONYAMLContentTypeRecognizer() ContentTypeRecognizer { + return jsonYAMLContentTypeRecognizer{} +} + +type jsonYAMLContentTypeRecognizer struct { +} + +var defaultExtMap = map[string]ContentType{ + ".json": ContentTypeJSON, + ".yml": ContentTypeYAML, + ".yaml": ContentTypeYAML, +} + +func (jsonYAMLContentTypeRecognizer) FromContentMetadata(m Metadata) (ContentType, bool) { + loc, ok := metadata.GetString(m, metadata.XContentLocationKey) + if !ok { + return "", false + } + ext := filepath.Ext(loc) + ct, ok := defaultExtMap[ext] + if !ok { + return "", false + } + return ct, true +} + +func (jsonYAMLContentTypeRecognizer) FromPeekBytes(peek []byte) (ContentType, bool) { + // Check if this is JSON or YAML + if yamlutil.IsJSONBuffer(peek) { + return ContentTypeJSON, true + } else if isYAML(peek) { + return ContentTypeYAML, true + } + return "", false +} + +func (jsonYAMLContentTypeRecognizer) SupportedContentTypes() ContentTypes { + return []ContentType{ContentTypeJSON, ContentTypeYAML} +} + +func isYAML(peek []byte) bool { + line, err := getLine(peek) + if err != nil { + return false + } + + o := map[string]interface{}{} + err = yaml.Unmarshal(line, &o) + return err == nil +} + +func getLine(peek []byte) ([]byte, error) { + s := bufio.NewScanner(bytes.NewReader(peek)) + // TODO: Support very long lines? (over 65k bytes?) Probably not + for s.Scan() { + t := bytes.TrimSpace(s.Bytes()) + // TODO: Ignore comments + if len(t) == 0 || bytes.Equal(t, []byte("---")) { + continue + } + return t, nil + } + // Return a possible scanning error + if err := s.Err(); err != nil { + return nil, err + } + return nil, errors.New("couldn't find non-empty line in scanner") +} diff --git a/pkg/content/recognizing_reader_test.go b/pkg/content/recognizing_reader_test.go new file mode 100644 index 00000000..804f237a --- /dev/null +++ b/pkg/content/recognizing_reader_test.go @@ -0,0 +1,96 @@ +package content + +import ( + "bufio" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_isYAML(t *testing.T) { + tests := []struct { + name string + peek string + want bool + }{ + { + name: "field mapping", + peek: "foo: bar\n", + want: true, + }, + { + name: "spaces and other empty documents", + peek: `--- + + +--- +--- +foo: bar`, + want: true, + }, + { + name: "bool", + peek: "foo: true", + want: true, + }, + { + name: "int", + peek: "foo: 5", + want: true, + }, + { + name: "float", + peek: "foo: 5.1", + want: true, + }, + { + name: "float", + peek: "foo: null", + want: true, + }, + { + name: "beginning of struct", + peek: "foo:", + want: true, + }, + { + name: "scalar null", + peek: `null`, + want: true, + }, + { + name: "nothing", + }, + { + name: "line overflow", + peek: strings.Repeat("a", bufio.MaxScanTokenSize) + ": true", + }, + + { + name: "list element struct", + peek: "- foo: bar", + }, + { + name: "list element string", + peek: "- foo", + }, + { + name: "scalar string", + peek: `foo`, + }, + { + name: "scalar int", + peek: `5`, + }, + { + name: "scalar float", + peek: `5.1`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, isYAML([]byte(tt.peek)), tt.want) + }) + } +} diff --git a/pkg/content/recognizing_test.go b/pkg/content/recognizing_test.go new file mode 100644 index 00000000..0350a6c7 --- /dev/null +++ b/pkg/content/recognizing_test.go @@ -0,0 +1,69 @@ +package content + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +func Test_negotiateAccept(t *testing.T) { + tests := []struct { + name string + accepts []string + supported []ContentType + want ContentType + wantOk bool + }{ + { + name: "accepts has higher priority than supported", + // application/bar is not supported, but the second highest priority does + accepts: []string{"application/bar", "application/json", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/json", + wantOk: true, + }, + { + name: "no accepts should give empty result", + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + }, + { + name: "no supported should give empty result", + accepts: []string{"application/bar", "application/json", "application/yaml"}, + }, + { + name: "invalid accept should give empty result", + accepts: []string{"///;;app/bar", "application/json", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + }, + { + name: "ignore extra parameters, e.g. q=0.8", + accepts: []string{"application/bar", "application/json;q=0.8", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/json", + wantOk: true, + }, + { + name: "allow comma separation", + accepts: []string{"application/bar, application/json;q=0.8", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/json", + wantOk: true, + }, + { + name: "accept all; choose the preferred one", + accepts: []string{"application/bar, */*;q=0.7", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/foo", + wantOk: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := NewMetadata(metadata.WithAccept(tt.accepts...)) + got, gotOk := negotiateAccept(m, tt.supported) + assert.Equal(t, tt.want, got) + assert.Equal(t, tt.wantOk, gotOk) + }) + } +} diff --git a/pkg/content/segment_reader.go b/pkg/content/segment_reader.go new file mode 100644 index 00000000..62f408ce --- /dev/null +++ b/pkg/content/segment_reader.go @@ -0,0 +1,63 @@ +package content + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/tracing" + "go.opentelemetry.io/otel/trace" +) + +type segmentReader struct { + MetadataContainer + read *readSegmentContextLockImpl + close *closeContextLockImpl +} + +func (r *segmentReader) WithContext(ctx context.Context) ClosableRawSegmentReader { + return closableRawSegmentReader{&segmentReaderWithContext{r.read, ctx}, &closerWithContext{r.close, ctx}} +} + +func (r *segmentReader) RawSegmentReader() RawSegmentReader { return r.read.r } +func (r *segmentReader) RawCloser() io.Closer { return r.close.c } + +type segmentReaderWithContext struct { + read *readSegmentContextLockImpl + ctx context.Context +} + +func (r *segmentReaderWithContext) Read() (content []byte, err error) { + r.read.setContext(r.ctx) + content, err = r.read.Read() + r.read.clearContext() + return +} + +type readSegmentContextLockImpl struct { + contextLockImpl + r RawSegmentReader + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (r *readSegmentContextLockImpl) Read() (content []byte, err error) { + ft := tracing.FromContext(r.ctx, r.r) + err = ft.TraceFunc(r.ctx, "ReadSegment", func(ctx context.Context, span trace.Span) error { + var tmperr error + if r.underlyingLock != nil { + r.underlyingLock.setContext(ctx) + } + content, tmperr = r.r.Read() + if r.underlyingLock != nil { + r.underlyingLock.clearContext() + } + span.SetAttributes(SpanAttrByteContent(content)...) + return tmperr + }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).RegisterCustom(SpanRegisterReadError) + return +} + +type closableRawSegmentReader struct { + RawSegmentReader + io.Closer +} diff --git a/pkg/content/tracing.go b/pkg/content/tracing.go new file mode 100644 index 00000000..f11eec83 --- /dev/null +++ b/pkg/content/tracing.go @@ -0,0 +1,33 @@ +package content + +import "go.opentelemetry.io/otel/attribute" + +const ( + SpanAttributeKeyByteContent = "byteContent" + SpanAttributeKeyByteContentLen = "byteContentLength" + SpanAttributeKeyByteContentCap = "byteContentCapacity" + SpanAttributeKeyContentMetadata = "contentMetadata" +) + +// SpanAttrByteContent registers byteContent and byteContentLength span attributes +// b should be the byte content that has been e.g. read or written in an io operation +func SpanAttrByteContent(b []byte) []attribute.KeyValue { + return []attribute.KeyValue{ + attribute.String(SpanAttributeKeyByteContent, string(b)), + attribute.Int64(SpanAttributeKeyByteContentLen, int64(len(b))), + } +} + +// SpanAttrByteContentCap extends SpanAttrByteContent with a capacity argument +// cap should be the capacity of e.g. that read or write, i.e. how much +// could have been read or written. +func SpanAttrByteContentCap(b []byte, cap int) []attribute.KeyValue { + return append(SpanAttrByteContent(b), + attribute.Int(SpanAttributeKeyByteContentCap, cap), + ) +} + +// TODO: This should be used upstream, too, or not? +func SpanAttrContentMetadata(m Metadata) attribute.KeyValue { + return attribute.Any(SpanAttributeKeyContentMetadata, m) +} diff --git a/pkg/content/writer.go b/pkg/content/writer.go new file mode 100644 index 00000000..167346ae --- /dev/null +++ b/pkg/content/writer.go @@ -0,0 +1,121 @@ +package content + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "go.opentelemetry.io/otel/trace" +) + +func NewWriter(w io.Writer, opts ...metadata.HeaderOption) Writer { + // If it already is a Writer, just return it + ww, ok := w.(Writer) + if ok { + return ww + } + + // Use the closer if available + c, _ := w.(io.Closer) + // Never close stdio + if isStdio(w) { + c = nil + } + mb := NewMetadata(opts...).ToContainer() + + return &writer{ + MetadataContainer: mb, + write: &writeContextLockImpl{ + w: w, + metaGetter: mb, + // underlyingLock is nil + }, + close: &closeContextLockImpl{ + c: c, + metaGetter: mb, + // underlyingLock is nil + }, + } +} + +type writer struct { + MetadataContainer + write *writeContextLockImpl + close *closeContextLockImpl +} + +func (w *writer) WithContext(ctx context.Context) io.WriteCloser { + return compositeio.WriteCloser(&writerWithContext{w.write, ctx}, &closerWithContext{w.close, ctx}) +} +func (w *writer) RawWriter() io.Writer { return w.write.w } +func (w *writer) RawCloser() io.Closer { return w.close.c } + +func (w *writer) Wrap(wrapFn WrapWriterFunc) Writer { + newWriter := wrapFn(compositeio.WriteCloser(w.write, w.close)) + if newWriter == nil { + panic("newWriter must not be nil") + } + // If an io.Closer is not returned, close this + // Reader's stream instead. Importantly enough, + // a trace will be registered for both this + // Reader, and the returned one. + newCloser, ok := newWriter.(io.Closer) + if !ok { + newCloser = w.close + } + + mb := w.ContentMetadata().Clone().ToContainer() + + return &writer{ + MetadataContainer: mb, + write: &writeContextLockImpl{ + w: newWriter, + metaGetter: mb, + underlyingLock: w.write, + }, + close: &closeContextLockImpl{ + c: newCloser, + metaGetter: mb, + underlyingLock: w.close, + }, + } +} + +type writerWithContext struct { + write *writeContextLockImpl + ctx context.Context +} + +func (w *writerWithContext) Write(p []byte) (n int, err error) { + w.write.setContext(w.ctx) + n, err = w.write.Write(p) + w.write.clearContext() + return +} + +type writeContextLockImpl struct { + contextLockImpl + w io.Writer + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (r *writeContextLockImpl) Write(p []byte) (n int, err error) { + ft := tracing.FromContext(r.ctx, r.w) + err = ft.TraceFunc(r.ctx, "Write", func(ctx context.Context, span trace.Span) error { + var tmperr error + if r.underlyingLock != nil { + r.underlyingLock.setContext(ctx) + } + n, tmperr = r.w.Write(p) + if r.underlyingLock != nil { + r.underlyingLock.clearContext() + } + // Register metadata in the span + span.SetAttributes(SpanAttrByteContentCap(p[:n], len(p))...) + return tmperr + }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).Register() + return +} From 570488a8eac59dd24324ba00df2a6ed78f1ea094 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:33:54 +0300 Subject: [PATCH 09/19] Implement the new framing library --- pkg/frame/constructors.go | 104 ++++++ pkg/frame/errors.go | 38 ++ pkg/frame/interfaces.go | 165 +++++++++ pkg/frame/k8s_reader_streaming.go | 110 ++++++ pkg/frame/k8s_reader_yaml.go | 130 +++++++ pkg/frame/options.go | 144 ++++++++ pkg/frame/options_boilerplate.go | 114 ++++++ pkg/frame/reader.go | 113 ++++++ pkg/frame/reader_factory.go | 76 ++++ pkg/frame/reader_single.go | 48 +++ pkg/frame/reader_streaming.go | 115 ++++++ pkg/frame/reader_test.go | 528 ++++++++++++++++++++++++++++ pkg/frame/utils.go | 78 ++++ pkg/frame/utils_test.go | 119 +++++++ pkg/frame/writer.go | 76 ++++ pkg/frame/writer_delegate.go | 58 +++ pkg/frame/writer_factory.go | 50 +++ pkg/frame/writer_test.go | 34 ++ pkg/serializer/error_structs.go | 52 --- pkg/serializer/frame_reader.go | 168 --------- pkg/serializer/frame_reader_test.go | 114 ------ pkg/serializer/frame_utils.go | 37 -- pkg/serializer/frame_writer.go | 128 ------- pkg/serializer/frame_writer_test.go | 66 ---- 24 files changed, 2100 insertions(+), 565 deletions(-) create mode 100644 pkg/frame/constructors.go create mode 100644 pkg/frame/errors.go create mode 100644 pkg/frame/interfaces.go create mode 100644 pkg/frame/k8s_reader_streaming.go create mode 100644 pkg/frame/k8s_reader_yaml.go create mode 100644 pkg/frame/options.go create mode 100644 pkg/frame/options_boilerplate.go create mode 100644 pkg/frame/reader.go create mode 100644 pkg/frame/reader_factory.go create mode 100644 pkg/frame/reader_single.go create mode 100644 pkg/frame/reader_streaming.go create mode 100644 pkg/frame/reader_test.go create mode 100644 pkg/frame/utils.go create mode 100644 pkg/frame/utils_test.go create mode 100644 pkg/frame/writer.go create mode 100644 pkg/frame/writer_delegate.go create mode 100644 pkg/frame/writer_factory.go create mode 100644 pkg/frame/writer_test.go delete mode 100644 pkg/serializer/error_structs.go delete mode 100644 pkg/serializer/frame_reader.go delete mode 100644 pkg/serializer/frame_reader_test.go delete mode 100644 pkg/serializer/frame_utils.go delete mode 100644 pkg/serializer/frame_writer.go delete mode 100644 pkg/serializer/frame_writer_test.go diff --git a/pkg/frame/constructors.go b/pkg/frame/constructors.go new file mode 100644 index 00000000..6e8ebe49 --- /dev/null +++ b/pkg/frame/constructors.go @@ -0,0 +1,104 @@ +package frame + +import ( + "bytes" + "context" + + "github.com/weaveworks/libgitops/pkg/content" +) + +// 2 generic Reader constructors + +func NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader { + return internalFactoryVar.NewSingleReader(ct, r, opts...) +} + +func NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader { + return internalFactoryVar.NewRecognizingReader(ctx, r, opts...) +} + +// 4 JSON-YAML Reader constructors using the default factory + +func NewYAMLReader(r content.Reader, opts ...ReaderOption) Reader { + return internalFactoryVar.NewReader(content.ContentTypeYAML, r, opts...) +} + +func NewJSONReader(r content.Reader, opts ...ReaderOption) Reader { + return internalFactoryVar.NewReader(content.ContentTypeJSON, r, opts...) +} + +func NewSingleYAMLReader(r content.Reader, opts ...SingleReaderOption) Reader { + return NewSingleReader(content.ContentTypeYAML, r, opts...) +} + +func NewSingleJSONReader(r content.Reader, opts ...SingleReaderOption) Reader { + return NewSingleReader(content.ContentTypeJSON, r, opts...) +} + +// 2 generic Writer constructors + +func NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(ct, w, opts...) +} + +func NewRecognizingWriter(r content.Writer, opts ...RecognizingWriterOption) Writer { + return internalFactoryVar.NewRecognizingWriter(r, opts...) +} + +// 4 JSON-YAML Writer constructors using the default factory + +func NewYAMLWriter(r content.Writer, opts ...WriterOption) Writer { + return internalFactoryVar.NewWriter(content.ContentTypeYAML, r, opts...) +} + +func NewJSONWriter(r content.Writer, opts ...WriterOption) Writer { + return internalFactoryVar.NewWriter(content.ContentTypeJSON, r, opts...) +} + +func NewSingleYAMLWriter(r content.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(content.ContentTypeYAML, r, opts...) +} + +func NewSingleJSONWriter(r content.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(content.ContentTypeJSON, r, opts...) +} + +// 1 single, 3 YAML and 1 recognizing content.Reader helper constructors + +/*func FromSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleReaderOption) Reader { + return NewSingleReader(ct, content.FromBuffer(buf), opts...) +}*/ + +func FromYAMLBytes(yamlBytes []byte, opts ...ReaderOption) Reader { + return NewYAMLReader(content.FromBytes(yamlBytes), opts...) +} + +func FromYAMLString(yamlStr string, opts ...ReaderOption) Reader { + return NewYAMLReader(content.FromString(yamlStr), opts...) +} + +func FromYAMLFile(filePath string, opts ...ReaderOption) Reader { + return NewYAMLReader(content.FromFile(filePath), opts...) +} + +func FromFile(ctx context.Context, filePath string, opts ...RecognizingReaderOption) Reader { + return NewRecognizingReader(ctx, content.FromFile(filePath), opts...) +} + +// 1 single, 2 YAML and 1 recognizing content.Writer helper constructors + +func ToSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleWriterOption) Writer { + return NewSingleWriter(ct, content.ToBuffer(buf), opts...) +} + +func ToYAMLBuffer(buf *bytes.Buffer, opts ...WriterOption) Writer { + return NewYAMLWriter(content.NewWriter(buf), opts...) +} + +func ToYAMLFile(filePath string, opts ...WriterOption) Writer { + return NewYAMLWriter(content.ToFile(filePath), opts...) +} + +func ToFile(filePath string, opts ...RecognizingWriterOption) Writer { + return NewRecognizingWriter(content.ToFile(filePath), opts...) +} diff --git a/pkg/frame/errors.go b/pkg/frame/errors.go new file mode 100644 index 00000000..e4539ce1 --- /dev/null +++ b/pkg/frame/errors.go @@ -0,0 +1,38 @@ +package frame + +import ( + "fmt" + + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "github.com/weaveworks/libgitops/pkg/util/structerr" +) + +// Enforce all struct errors implementing structerr.StructError +var _ structerr.StructError = &FrameCountOverflowError{} + +// FrameCountOverflowError is returned when a Reader or Writer would process more +// frames than allowed. +type FrameCountOverflowError struct { + // +optional + MaxFrameCount limitedio.Limit +} + +func (e *FrameCountOverflowError) Error() string { + msg := "no more frames can be processed, hit maximum amount" + if e.MaxFrameCount < 0 { + msg = fmt.Sprintf("%s: infinity", msg) // this is most likely a programming error + } else if e.MaxFrameCount > 0 { + msg = fmt.Sprintf("%s: %d", msg, e.MaxFrameCount) + } + return msg +} + +func (e *FrameCountOverflowError) Is(target error) bool { + _, ok := target.(*FrameCountOverflowError) + return ok +} + +// ErrFrameCountOverflow creates a *FrameCountOverflowError +func ErrFrameCountOverflow(maxFrames limitedio.Limit) *FrameCountOverflowError { + return &FrameCountOverflowError{MaxFrameCount: maxFrames} +} diff --git a/pkg/frame/interfaces.go b/pkg/frame/interfaces.go new file mode 100644 index 00000000..363e166a --- /dev/null +++ b/pkg/frame/interfaces.go @@ -0,0 +1,165 @@ +package frame + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/content" +) + +// TODO: Maybe implement/use context-aware (cancellable) io.Readers and io.Writers underneath? + +// Closer is like io.Closer, but with a Context passed along as well. +type Closer interface { + // Close closes the underlying resource. If Close is called multiple times, the + // underlying io.Closer decides the behavior and return value. If Close is called + // during a Read/Write operation, the underlying io.ReadCloser/io.WriteCloser + // decides the behavior. + Close(ctx context.Context) error +} + +// Reader is a framing type specific reader of an underlying io.Reader or io.ReadCloser. +// If an io.Reader is used, Close(ctx) is a no-op. If an io.ReadCloser is used, Close(ctx) +// will close the underlying io.ReadCloser. +// +// The Reader returns frames, as defined by the relevant framing type. +// For example, for YAML a frame represents a YAML document, while JSON is a self-framing +// format, i.e. encoded objects can be written to a stream just as +// '{ "a": "" ... }{ "b": "" ... }' and separated from there. +// +// Another way of defining a "frame" is that it MUST contain exactly one decodable object. +// This means that no empty (i.e. len(frame) == 0) frames shall be returned. Note: The decodable +// object might represent a list object (e.g. as Kubernetes' v1.List); more generally something +// decodable into a Go struct. +// +// The Reader can use as many underlying Read(p []byte) (n int, err error) calls it needs +// to the underlying io.Read(Clos)er. As long as frames can successfully be read from the underlying +// io.Read(Clos)er, len(frame) != 0 and err == nil. When io.EOF is encountered, len(frame) == 0 and +// errors.Is(err, io.EOF) == true. +// +// The Reader MUST be thread-safe, i.e. it must use the underlying io.Reader responsibly +// without causing race conditions when reading, e.g. by guarding reads with a mutual +// exclusion lock (mutex). The mutex isn't locked for closes, however. This enables e.g. closing the +// reader during a read operation, and other custom closing behaviors. +// +// The Reader MUST directly abort the read operation if the frame size exceeds +// ReadWriterOptions.MaxFrameSize, and return ErrFrameSizeOverflow. +// +// The Reader MUST return ErrFrameCountOverflow if the underlying Reader has returned more than +// ReadWriterOptions.MaxFrameCount successful read operations. The "total" frame limit is +// 10 * ReadWriterOptions.MaxFrameCount, which includes failed, empty and successful frames. +// Returned errors (including io.EOF) MUST be checked for equality using +// errors.Is(err, target), NOT using err == target. +// +// TODO: Say that the ContentType is assumed constant per content.Reader +// +// The Reader MAY respect cancellation signals on the context, depending on ReaderOptions. +// The Reader MAY support reporting trace spans for how long certain operations take. +type Reader interface { + // The Reader is specific to possibly multiple framing types + content.ContentTyped + + // ReadFrame reads one frame from the underlying io.Read(Clos)er. At maximum, the frame is as + // large as ReadWriterOptions.MaxFrameSize. See the documentation on the Reader interface for more + // details. + ReadFrame(ctx context.Context) ([]byte, error) + + // Exposes Metadata about the underlying io.Reader + content.MetadataContainer + + // The Reader can be closed. If an underlying io.Reader is used, this is a no-op. If an + // io.ReadCloser is used, this will close that io.ReadCloser. + Closer +} + +// ReaderFactory is capable of creating Readers. +type ReaderFactory interface { + // ct is dominant; will error if r has a conflicting content type + // ct must be one of the supported content types + NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader + // opts.MaxFrameCount is dominant, will always be set to 1 + // ct can be anything + // ct is dominant; will error if r has a conflicting content type + // Single options should not have MaxFrameCount at all, if possible + NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader + // will use the content type from r if set, otherwise infer from content metadata + // or peek bytes using the content.ContentTypeRecognizer + // should add to options for a recognizer + NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader + + // The SupportedContentTypes() method specifies what content types are supported by the + // ReaderFactory + content.ContentTypeSupporter +} + +// Writer is a framing type specific writer to an underlying io.Writer or io.WriteCloser. +// If an io.Writer is used, Close(ctx) is a no-op. If an io.WriteCloser is used, Close(ctx) +// will close the underlying io.WriteCloser. +// +// The Writer writes frames to the underlying stream, as defined by the framing type. +// For example, for YAML a frame represents a YAML document, while JSON is a self-framing +// format, i.e. encoded objects can be written to a stream just as +// '{ "a": "" ... }{ "b": "" ... }'. +// +// Another way of defining a "frame" is that it MUST contain exactly one decodable object. +// It is valid (but not recommended) to supply empty frames to the Writer. +// +// Writer will only call the underlying io.Write(Close)r's Write(p []byte) call once. +// If n < len(frame) and err == nil, io.ErrShortWrite will be returned. This means that +// it's the underlying io.Writer's responsibility to buffer the frame data, if needed. +// +// The Writer MUST be thread-safe, i.e. it must use the underlying io.Writer responsibly +// without causing race conditions when reading, e.g. by guarding writes/closes with a +// mutual exclusion lock (mutex). The mutex isn't locked for closes, however. +// This enables e.g. closing the writer during a write operation, and other custom closing behaviors. +// +// The Writer MUST directly abort the write operation if the frame size exceeds ReadWriterOptions.MaxFrameSize, +// and return ErrFrameSizeOverflow. The Writer MUST ignore empty frames, where len(frame) == 0, possibly +// after sanitation. The Writer MUST return ErrFrameCountOverflow if WriteFrame has been called more than +// ReadWriterOptions.MaxFrameCount times. +// +// Returned errors MUST be checked for equality using errors.Is(err, target), NOT using err == target. +// +// The Writer MAY respect cancellation signals on the context, depending on WriterOptions. +// The Writer MAY support reporting trace spans for how long certain operations take. +// +// TODO: Say that the ContentType is assumed constant per content.Writer +type Writer interface { + // The Writer is specific to this framing type. + content.ContentTyped + // WriteFrame writes one frame to the underlying io.Write(Close)r. + // See the documentation on the Writer interface for more details. + WriteFrame(ctx context.Context, frame []byte) error + + // Exposes metadata from the underlying content.Writer + content.MetadataContainer + + // The Writer can be closed. If an underlying io.Writer is used, this is a no-op. If an + // io.WriteCloser is used, this will close that io.WriteCloser. + Closer +} + +// WriterFactory is capable of creating Writers. +type WriterFactory interface { + // ct is dominant; will error if r has a conflicting content type + // ct must be one of the supported content types + NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer + // opts.MaxFrameCount is dominant, will always be set to 1 + // ct can be anything + // ct is dominant; will error if r has a conflicting content type + // Single options should not have MaxFrameCount at all, if possible + NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer + // will use the content type from r if set, otherwise infer from content metadata + // using the content.ContentTypeRecognizer + // should add to options for a recognizer + NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer + + // The SupportedContentTypes() method specifies what content types are supported by the + // WriterFactory + content.ContentTypeSupporter +} + +// Factory is the union of ReaderFactory and WriterFactory. +type Factory interface { + ReaderFactory + WriterFactory +} diff --git a/pkg/frame/k8s_reader_streaming.go b/pkg/frame/k8s_reader_streaming.go new file mode 100644 index 00000000..9ff21cec --- /dev/null +++ b/pkg/frame/k8s_reader_streaming.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides a means to read one whole frame from an io.ReadCloser +// returned by a k8s.io/apimachinery/pkg/runtime.Framer.NewFrameReader() +// +// This code is (temporarily) forked and derived from +// https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go +// and will be upstreamed if maintainers allow. The reason for forking this +// small piece of code is two-fold: a) This functionality is bundled within +// a runtime.Decoder, not provided as "just" some type of Reader, b) The +// upstream doesn't allow to configure the maximum frame size. + +package frame + +import ( + "fmt" + "io" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" +) + +// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L63-L67 +func newK8sStreamingReader(rc io.ReadCloser, maxFrameSize int64) content.ClosableRawSegmentReader { + if maxFrameSize == 0 { + maxFrameSize = limitedio.DefaultMaxReadSize.Int64() + } + + return &k8sStreamingReaderImpl{ + reader: rc, + buf: make([]byte, 1024), + // CHANGE: maxBytes is configurable + maxBytes: maxFrameSize, + } +} + +// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L51-L57 +type k8sStreamingReaderImpl struct { + reader io.ReadCloser + buf []byte + // CHANGE: In the original code, maxBytes was an int. int64 is more specific and flexible, however. + // TODO: Re-review this code; shall we have int or int64 here? + maxBytes int64 + resetRead bool +} + +// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L75-L106 +func (d *k8sStreamingReaderImpl) Read() ([]byte, error) { + base := 0 + for { + n, err := d.reader.Read(d.buf[base:]) + if err == io.ErrShortBuffer { + if n == 0 { + return nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf)) + } + if d.resetRead { + continue + } + // double the buffer size up to maxBytes + // NOTE: This might need changing upstream eventually, it only works when + // d.maxBytes/len(d.buf) is a multiple of 2 + // CHANGE: In the original code no cast from int -> int64 was needed + bufLen := int64(len(d.buf)) + if bufLen < d.maxBytes { + base += n + // CHANGE: Instead of unconditionally doubling the buffer, double the buffer + // length only to the extent it fits within d.maxBytes. Previously, it was a + // requirement that d.maxBytes was a multiple of 1024 for this logic to work. + newBytes := len(d.buf) + if d.maxBytes < 2*bufLen { + newBytes = int(d.maxBytes - bufLen) + } + d.buf = append(d.buf, make([]byte, newBytes)...) + continue + } + // must read the rest of the frame (until we stop getting ErrShortBuffer) + d.resetRead = true + // base = 0 // CHANGE: Not needed (as pointed out by golangci-lint:ineffassign) + return nil, streaming.ErrObjectTooLarge + } + if err != nil { + return nil, err + } + if d.resetRead { + // now that we have drained the large read, continue + d.resetRead = false + continue + } + base += n + break + } + return d.buf[:base], nil +} + +func (d *k8sStreamingReaderImpl) Close() error { return d.reader.Close() } diff --git a/pkg/frame/k8s_reader_yaml.go b/pkg/frame/k8s_reader_yaml.go new file mode 100644 index 00000000..eac7c50c --- /dev/null +++ b/pkg/frame/k8s_reader_yaml.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides a means to extract one YAML frame from an io.ReadCloser +// +// This code is (temporarily) forked and derived from +// https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/util/yaml/decoder.go#L111 +// and will be upstreamed if maintainers allow. The reason for forking this +// small piece of code is two-fold: a) The upstream doesn't allow configuring +// the maximum frame size, but hard-codes it to 5MB and b) for the first +// frame, the "---\n" prefix is returned and would otherwise be unnecessarily +// counted as frame content, when it actually is a frame separator. + +package frame + +import ( + "bufio" + "bytes" + "io" +) + +// k8sYAMLReader reads chunks of objects and returns ErrShortBuffer if +// the data is not sufficient. +type k8sYAMLReader struct { + r io.ReadCloser + scanner *bufio.Scanner + remaining []byte +} + +// newK8sYAMLReader decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk. io.ErrShortBuffer will be +// returned if the entire buffer could not be read to assist +// the caller in framing the chunk. +func newK8sYAMLReader(r io.ReadCloser, maxFrameSize int) io.ReadCloser { + scanner := bufio.NewScanner(r) + // the size of initial allocation for buffer 4k + buf := make([]byte, 4*1024) + // the maximum size used to buffer a token 5M + scanner.Buffer(buf, maxFrameSize) + scanner.Split(splitYAMLDocument) + return &k8sYAMLReader{ + r: r, + scanner: scanner, + } +} + +// Read reads the previous slice into the buffer, or attempts to read +// the next chunk. +// TODO: switch to readline approach. +func (d *k8sYAMLReader) Read(data []byte) (n int, err error) { + left := len(d.remaining) + if left == 0 { + // return the next chunk from the stream + if !d.scanner.Scan() { + err := d.scanner.Err() + if err == nil { + err = io.EOF + } + return 0, err + } + out := d.scanner.Bytes() + // TODO: This could be removed by the sanitation step; we don't have to + // do it here at this point. + out = bytes.TrimPrefix(out, []byte("---\n")) + d.remaining = out + left = len(out) + } + + // fits within data + if left <= len(data) { + copy(data, d.remaining) + d.remaining = nil + return left, nil + } + + // caller will need to reread + copy(data, d.remaining[:len(data)]) + d.remaining = d.remaining[len(data):] + return len(data), io.ErrShortBuffer +} + +func (d *k8sYAMLReader) Close() error { + return d.r.Close() +} + +const yamlSeparator = "\n---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} diff --git a/pkg/frame/options.go b/pkg/frame/options.go new file mode 100644 index 00000000..a3976578 --- /dev/null +++ b/pkg/frame/options.go @@ -0,0 +1,144 @@ +package frame + +import ( + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/util/limitedio" +) + +// TODO: Unit-test the options + +// DefaultMaxFrameCount specifies the default maximum of frames that can be read by a Reader. +const DefaultReadMaxFrameCount = 1024 + +type singleReaderOptions struct{ SingleOptions } +type singleWriterOptions struct{ SingleOptions } +type readerOptions struct{ Options } +type writerOptions struct{ Options } +type recognizingReaderOptions struct{ RecognizingOptions } +type recognizingWriterOptions struct{ RecognizingOptions } + +func defaultSingleReaderOptions() *singleReaderOptions { + return &singleReaderOptions{ + SingleOptions: SingleOptions{ + MaxFrameSize: limitedio.DefaultMaxReadSize, + Sanitizer: sanitize.NewJSONYAML(), + }, + } +} + +func defaultSingleWriterOptions() *singleWriterOptions { + return &singleWriterOptions{ + SingleOptions: SingleOptions{ + MaxFrameSize: limitedio.Infinite, + Sanitizer: sanitize.NewJSONYAML(), + }, + } +} + +func defaultReaderOptions() *readerOptions { + return &readerOptions{ + Options: Options{ + SingleOptions: defaultSingleReaderOptions().SingleOptions, + MaxFrameCount: DefaultReadMaxFrameCount, + }, + } +} + +func defaultWriterOptions() *writerOptions { + return &writerOptions{ + Options: Options{ + SingleOptions: defaultSingleWriterOptions().SingleOptions, + MaxFrameCount: limitedio.Infinite, + }, + } +} + +func defaultRecognizingReaderOptions() *recognizingReaderOptions { + return &recognizingReaderOptions{ + RecognizingOptions: RecognizingOptions{ + Options: defaultReaderOptions().Options, + Recognizer: content.NewJSONYAMLContentTypeRecognizer(), + }, + } +} + +func defaultRecognizingWriterOptions() *recognizingWriterOptions { + return &recognizingWriterOptions{ + RecognizingOptions: RecognizingOptions{ + Options: defaultWriterOptions().Options, + Recognizer: content.NewJSONYAMLContentTypeRecognizer(), + }, + } +} + +type SingleOptions struct { + // MaxFrameSize specifies the maximum allowed frame size that can be read and returned. + // Must be a positive integer. Defaults to DefaultMaxFrameSize. TODO + MaxFrameSize limitedio.Limit + // Sanitizer configures the sanitizer that should be used for sanitizing the frames. + Sanitizer sanitize.Sanitizer +} + +func (o SingleOptions) applyToSingle(target *SingleOptions) { + if o.MaxFrameSize != 0 { + target.MaxFrameSize = o.MaxFrameSize + } + if o.Sanitizer != nil { + target.Sanitizer = o.Sanitizer + } +} + +type Options struct { + SingleOptions + + // MaxFrameCount specifies the maximum amount of successful frames that can be read or written + // using a Reader or Writer. This means that e.g. empty frames after sanitation are NOT + // counted as a frame in this context. When reading, there can be a maximum of 10*MaxFrameCount + // in total (including failed and empty). Must be a positive integer. Defaults: TODO DefaultMaxFrameCount. + MaxFrameCount limitedio.Limit +} + +func (o Options) applyTo(target *Options) { + if o.MaxFrameCount != 0 { + target.MaxFrameCount = o.MaxFrameCount + } + o.applyToSingle(&target.SingleOptions) +} + +type RecognizingOptions struct { + Options + + Recognizer content.ContentTypeRecognizer +} + +func (o RecognizingOptions) applyToRecognizing(target *RecognizingOptions) { + if o.Recognizer != nil { + target.Recognizer = o.Recognizer + } + o.applyTo(&target.Options) +} + +type SingleReaderOption interface { + ApplyToSingleReader(target *singleReaderOptions) +} + +type SingleWriterOption interface { + ApplyToSingleWriter(target *singleWriterOptions) +} + +type ReaderOption interface { + ApplyToReader(target *readerOptions) +} + +type WriterOption interface { + ApplyToWriter(target *writerOptions) +} + +type RecognizingReaderOption interface { + ApplyToRecognizingReader(target *recognizingReaderOptions) +} + +type RecognizingWriterOption interface { + ApplyToRecognizingWriter(target *recognizingWriterOptions) +} diff --git a/pkg/frame/options_boilerplate.go b/pkg/frame/options_boilerplate.go new file mode 100644 index 00000000..097421f4 --- /dev/null +++ b/pkg/frame/options_boilerplate.go @@ -0,0 +1,114 @@ +package frame + +var ( + _ SingleReaderOption = SingleOptions{} + _ SingleWriterOption = SingleOptions{} + _ ReaderOption = SingleOptions{} + _ WriterOption = SingleOptions{} + _ RecognizingReaderOption = SingleOptions{} + _ RecognizingWriterOption = SingleOptions{} + + _ SingleReaderOption = Options{} + _ SingleWriterOption = Options{} + _ ReaderOption = Options{} + _ WriterOption = Options{} + _ RecognizingReaderOption = Options{} + _ RecognizingWriterOption = Options{} + + _ SingleReaderOption = RecognizingOptions{} + _ SingleWriterOption = RecognizingOptions{} + _ ReaderOption = RecognizingOptions{} + _ WriterOption = RecognizingOptions{} + _ RecognizingReaderOption = RecognizingOptions{} + _ RecognizingWriterOption = RecognizingOptions{} +) + +func (o SingleOptions) ApplyToSingleReader(target *singleReaderOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToSingleWriter(target *singleWriterOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToReader(target *readerOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToWriter(target *writerOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToRecognizingReader(target *recognizingReaderOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToRecognizingWriter(target *recognizingWriterOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o Options) ApplyToReader(target *readerOptions) { + o.applyTo(&target.Options) +} + +func (o Options) ApplyToWriter(target *writerOptions) { + o.applyTo(&target.Options) +} + +func (o Options) ApplyToRecognizingReader(target *recognizingReaderOptions) { + o.applyTo(&target.Options) +} + +func (o Options) ApplyToRecognizingWriter(target *recognizingWriterOptions) { + o.applyTo(&target.Options) +} + +func (o RecognizingOptions) ApplyToRecognizingReader(target *recognizingReaderOptions) { + o.applyToRecognizing(&target.RecognizingOptions) +} + +func (o RecognizingOptions) ApplyToRecognizingWriter(target *recognizingWriterOptions) { + o.applyToRecognizing(&target.RecognizingOptions) +} + +func (o *singleReaderOptions) applyOptions(opts []SingleReaderOption) *singleReaderOptions { + for _, opt := range opts { + opt.ApplyToSingleReader(o) + } + return o +} + +func (o *singleWriterOptions) applyOptions(opts []SingleWriterOption) *singleWriterOptions { + for _, opt := range opts { + opt.ApplyToSingleWriter(o) + } + return o +} + +func (o *readerOptions) applyOptions(opts []ReaderOption) *readerOptions { + for _, opt := range opts { + opt.ApplyToReader(o) + } + return o +} + +func (o *writerOptions) applyOptions(opts []WriterOption) *writerOptions { + for _, opt := range opts { + opt.ApplyToWriter(o) + } + return o +} + +func (o *recognizingReaderOptions) applyOptions(opts []RecognizingReaderOption) *recognizingReaderOptions { + for _, opt := range opts { + opt.ApplyToRecognizingReader(o) + } + return o +} + +func (o *recognizingWriterOptions) applyOptions(opts []RecognizingWriterOption) *recognizingWriterOptions { + for _, opt := range opts { + opt.ApplyToRecognizingWriter(o) + } + return o +} diff --git a/pkg/frame/reader.go b/pkg/frame/reader.go new file mode 100644 index 00000000..b2800d32 --- /dev/null +++ b/pkg/frame/reader.go @@ -0,0 +1,113 @@ +package frame + +import ( + "context" + "sync" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" +) + +// newHighlevelReader takes a "low-level" Reader (like *streamingReader or *yamlReader), +// and implements higher-level logic like proper closing, mutex locking and tracing. +func newHighlevelReader(r Reader, o *readerOptions) Reader { + return &highlevelReader{ + read: r, + readMu: &sync.Mutex{}, + opts: o, + maxTotalFrames: limitedio.Limit(o.MaxFrameCount * 10), + } +} + +// highlevelReader uses the closableResource for the mutex locking, properly handling +// the close logic, and initiating the trace spans. On top of that it records extra +// tracing context in ReadFrame. +type highlevelReader struct { + read Reader + // readMu guards read.ReadFrame + readMu *sync.Mutex + + opts *readerOptions + // maxTotalFrames is set to opts.MaxFrameCount * 10 + maxTotalFrames limitedio.Limit + // successfulFrameCount counts the amount of successful frames read + successfulFrameCount int64 + // totalFrameCount counts the total amount of frames read (including empty and failed ones) + totalFrameCount int64 +} + +func (r *highlevelReader) ReadFrame(ctx context.Context) ([]byte, error) { + // Make sure we have access to the underlying resource + r.readMu.Lock() + defer r.readMu.Unlock() + + var frame []byte + err := tracing.FromContext(ctx, r). + TraceFunc(ctx, "ReadFrame", func(ctx context.Context, span trace.Span) error { + + // Refuse to read more than the maximum amount of successful frames + if r.opts.MaxFrameCount.IsLessThan(r.successfulFrameCount) { + return ErrFrameCountOverflow(r.opts.MaxFrameCount) + } + + // Call the underlying reader + var err error + frame, err = r.readFrame(ctx) + if err != nil { + return err + } + + // Record how large the frame is, and its content for debugging + span.SetAttributes(content.SpanAttrByteContent(frame)...) + return nil + }).RegisterCustom(content.SpanRegisterReadError) + // SpanRegisterReadError registers io.EOF as an "event", and other errors as "unknown errors" in the trace + if err != nil { + return nil, err + } + return frame, nil +} + +func (r *highlevelReader) readFrame(ctx context.Context) ([]byte, error) { + // Ensure the total number of frames doesn't overflow + // TODO: Should this be LT or LTE? + if r.maxTotalFrames.IsLessThanOrEqual(r.totalFrameCount) { + return nil, ErrFrameCountOverflow(r.maxTotalFrames) + } + // Read the frame, and increase the total frame counter is increased + // This does not at the moment forward the same ReadFrameResult instance, + // but that can maybe be done in the future if needed. It would be needed + // if the underlying Reader would return an interface that extends more + // methods than the default ones. + frame, err := r.read.ReadFrame(ctx) + r.totalFrameCount += 1 + if err != nil { + return nil, err + } + + // Sanitize the frame. + frame, err = sanitize.IfSupported(ctx, r.opts.Sanitizer, r.ContentType(), frame) + if err != nil { + return nil, err + } + + // If it's empty, read the next frame automatically + if len(frame) == 0 { + return r.readFrame(ctx) + } + + // Otherwise, if it's non-empty, return it and increase the "successful" counter + r.successfulFrameCount += 1 + // If the frame count now overflows, return a ErrFrameCountOverflow + if r.opts.MaxFrameCount.IsLessThan(r.successfulFrameCount) { + return nil, ErrFrameCountOverflow(r.opts.MaxFrameCount) + } + return frame, nil +} + +func (r *highlevelReader) ContentType() content.ContentType { return r.read.ContentType() } +func (r *highlevelReader) Close(ctx context.Context) error { return closeWithTrace(ctx, r.read, r) } +func (r *highlevelReader) ContentMetadata() content.Metadata { return r.read.ContentMetadata() } diff --git a/pkg/frame/reader_factory.go b/pkg/frame/reader_factory.go new file mode 100644 index 00000000..adbea975 --- /dev/null +++ b/pkg/frame/reader_factory.go @@ -0,0 +1,76 @@ +package frame + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/util/limitedio" +) + +// DefaultFactory returns the default implementation of both the ReaderFactory and +// WriterFactory. All constructors in this package use this Factory. +func DefaultFactory() Factory { return defaultFactory{} } + +var internalFactoryVar = DefaultFactory() + +type defaultFactory struct{} + +func (defaultFactory) NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader { + o := defaultReaderOptions().applyOptions(opts) + + var lowlevel Reader + switch ct { + case content.ContentTypeYAML: + lowlevel = newYAMLReader(r, o) + case content.ContentTypeJSON: + lowlevel = newJSONReader(r, o) + default: + return newErrReader(content.ErrUnsupportedContentType(ct), "", r.ContentMetadata()) + } + return newHighlevelReader(lowlevel, o) +} + +func (defaultFactory) NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader { + o := defaultSingleReaderOptions().applyOptions(opts) + + return newHighlevelReader(newSingleReader(r, ct, o), &readerOptions{ + // Note: The MaxFrameCount == Infinite here makes the singleReader responsible for + // counting how many times + Options: Options{SingleOptions: o.SingleOptions, MaxFrameCount: limitedio.Infinite}, + }) +} + +func (f defaultFactory) NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader { + o := defaultRecognizingReaderOptions().applyOptions(opts) + + // Recognize the content type using the given recognizer + r, ct, err := content.NewRecognizingReader(ctx, r, o.Recognizer) + if err != nil { + return newErrReader(err, "", r.ContentMetadata()) + } + // Re-use the logic of the "main" Reader constructor; validate ct there + return f.NewReader(ct, r, o) +} + +func (defaultFactory) SupportedContentTypes() content.ContentTypes { + return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON} +} + +func newErrReader(err error, ct content.ContentType, meta content.Metadata) Reader { + return &errReader{ + ct, + meta.ToContainer(), + &nopCloser{}, + err, + } +} + +// errReader always returns an error +type errReader struct { + content.ContentTyped + content.MetadataContainer + Closer + err error +} + +func (r *errReader) ReadFrame(context.Context) ([]byte, error) { return nil, r.err } diff --git a/pkg/frame/reader_single.go b/pkg/frame/reader_single.go new file mode 100644 index 00000000..27470721 --- /dev/null +++ b/pkg/frame/reader_single.go @@ -0,0 +1,48 @@ +package frame + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/content" +) + +func newSingleReader(r content.Reader, ct content.ContentType, o *singleReaderOptions) Reader { + // Make sure not more than this set of bytes can be read + r, _ = content.WrapLimited(r, o.MaxFrameSize) + return &singleReader{ + // TODO: Apply options? + MetadataContainer: r.ContentMetadata().Clone().ToContainer(), + ContentTyped: ct, + r: r, + } +} + +// singleReader implements reading a single frame (up to a certain limit) from an io.ReadCloser. +// It MUST be wrapped in a higher-level composite Reader like the highlevelReader to satisfy the +// Reader interface correctly. +type singleReader struct { + content.MetadataContainer + content.ContentTyped + r content.Reader + hasBeenRead bool +} + +// Read the whole frame from the underlying io.Reader, up to a given limit +func (r *singleReader) ReadFrame(ctx context.Context) ([]byte, error) { + if r.hasBeenRead { + // This really should never happen, because the higher-level Reader should ensure + // no more than one frame can be read from the downstream as opts.MaxFrameCount == 1. + return nil, io.EOF // TODO: What about the third time? + } + // Mark we are now the frame (regardless of the result) + r.hasBeenRead = true + // Read the whole frame from the underlying io.Reader, up to a given amount + frame, err := io.ReadAll(r.r.WithContext(ctx)) + if err != nil { + return nil, err + } + return frame, nil +} + +func (r *singleReader) Close(ctx context.Context) error { return r.r.WithContext(ctx).Close() } diff --git a/pkg/frame/reader_streaming.go b/pkg/frame/reader_streaming.go new file mode 100644 index 00000000..86efddbd --- /dev/null +++ b/pkg/frame/reader_streaming.go @@ -0,0 +1,115 @@ +package frame + +import ( + "context" + "errors" + "io" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" +) + +func newYAMLReader(r content.Reader, o *readerOptions) Reader { + // json.YAMLFramer.NewFrameReader takes care of the actual YAML framing logic + maxFrameSizeInt, err := o.MaxFrameSize.Int() + if err != nil { + return newErrReader(err, "", r.ContentMetadata()) + } + r = r.Wrap(func(underlying io.ReadCloser) io.Reader { + return newK8sYAMLReader(underlying, maxFrameSizeInt) + }) + + // Mark the content type as YAML + r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeYAML)) + + return newStreamingReader(content.ContentTypeYAML, r, o.MaxFrameSize) +} + +// newJSONReader creates a "low-level" JSON Reader from the given io.ReadCloser. +func newJSONReader(r content.Reader, o *readerOptions) Reader { + // json.Framer.NewFrameReader takes care of the actual JSON framing logic + r = r.Wrap(func(underlying io.ReadCloser) io.Reader { + return json.Framer.NewFrameReader(underlying) + }) + + // Mark the content type as JSON + r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeJSON)) + + return newStreamingReader(content.ContentTypeJSON, r, o.MaxFrameSize) +} + +// newStreamingReader makes a generic Reader that reads from an io.ReadCloser returned +// from Kubernetes' runtime.Framer.NewFrameReader, in exactly the way +// k8s.io/apimachinery/pkg/runtime/serializer/streaming implements this. +// On a high-level, it means that many small Read(p []byte) calls are made as long as +// io.ErrShortBuffer is returned. When err == nil is returned from rc, we know that we're +// at the end of a frame, and at that point the frame is returned. +// +// Note: This Reader is a so-called "low-level" one. It doesn't do tracing, mutex locking, or +// proper closing logic. It must be wrapped by a composite, high-level Reader like highlevelReader. +func newStreamingReader(ct content.ContentType, r content.Reader, maxFrameSize limitedio.Limit) Reader { + // Limit the amount of bytes read from the content.Reader + r, resetCounter := content.WrapLimited(r, maxFrameSize) + // Wrap + cr := r.WrapSegment(func(rc io.ReadCloser) content.RawSegmentReader { + return newK8sStreamingReader(rc, maxFrameSize.Int64()) + }) + + return &streamingReader{ + // Clone the metadata and expose it + // TODO: Maybe ReaderOptions should allow changing it? + MetadataContainer: r.ContentMetadata().Clone().ToContainer(), + ContentTyped: ct, + resetCounter: resetCounter, + cr: cr, + maxFrameSize: maxFrameSize, + } +} + +// streamingReader is a small "conversion" struct that implements the Reader interface for a +// given k8sStreamingReader. When reader_streaming_k8s.go is upstreamed, we can replace the +// temporary k8sStreamingReader interface with a "proper" Kubernetes one. +type streamingReader struct { + content.MetadataContainer + content.ContentTyped + resetCounter content.ResetCounterFunc + cr content.SegmentReader + maxFrameSize limitedio.Limit +} + +func (r *streamingReader) ReadFrame(ctx context.Context) ([]byte, error) { + // Read one frame from the streamReader + frame, err := r.cr.WithContext(ctx).Read() + if err != nil { + // Transform streaming.ErrObjectTooLarge to a ErrFrameSizeOverflow, if returned. + return nil, mapError(err, errorMappings{ + streaming.ErrObjectTooLarge: func() error { + return limitedio.ErrReadSizeOverflow(r.maxFrameSize) + }, + }) + } + // Reset the counter only when we have a successful frame + r.resetCounter() + return frame, nil +} + +func (r *streamingReader) Close(ctx context.Context) error { return r.cr.WithContext(ctx).Close() } + +// mapError is an utility for mapping a "actual" error to a lazily-evaluated "desired" one. +// Equality between the errorMappings' keys and err is defined by errors.Is +func mapError(err error, f errorMappings) error { + for target, mkErr := range f { + if errors.Is(err, target) { + return mkErr() + } + } + return err +} + +// errorMappings maps actual errors to lazily-evaluated desired ones +type errorMappings map[error]mkErrorFunc + +// mkErrorFunc lazily creates an error +type mkErrorFunc func() error diff --git a/pkg/frame/reader_test.go b/pkg/frame/reader_test.go new file mode 100644 index 00000000..3fd09e22 --- /dev/null +++ b/pkg/frame/reader_test.go @@ -0,0 +1,528 @@ +package frame + +import ( + "bytes" + "context" + "fmt" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" +) + +/* +Enable logging and tracing of the unit tests in this package + +Imports: +"go.uber.org/zap/zapcore" +"sigs.k8s.io/controller-runtime/pkg/log" +"sigs.k8s.io/controller-runtime/pkg/log/zap" + +func init() { + + // Set up the global logger, in "console mode" for human-friendly output + log.SetLogger(zap.New(zap.ConsoleEncoder(func(ec *zapcore.EncoderConfig) { + ec.TimeKey = "" + }))) + + err := tracing.NewBuilder(). + //RegisterStdoutExporter(stdouttrace.WithWriter(io.Discard)). + RegisterInsecureJaegerExporter(""). + WithLogging(true). + InstallGlobally() + if err != nil { + fmt.Printf("failed to install tracing provider: %v\n", err) + os.Exit(1) + } +}*/ + +// TODO: Make sure that len(frame) == 0 when err != nil for the Writer. + +// TODO: Test the output traces more througoutly, when there is SpanProcessor that supports writing +// relevant data to a file, and do matching between spans. + +// TODO: Make some 3M (or more) JSON/YAML files and show that these are readable (or not). That's not +// testing a case that already isn't tested by the unit tests below, but would be a good marker that +// it actually solves the right problem. + +// TODO: Maybe add some race-condition tests? The centralized place mutexes are used are in +// highlevel{Reader,Writer}, so that'd be the place in that case. + +type testcase struct { + singleReadOpts []SingleReaderOption + singleWriteOpts []SingleWriterOption + // single{Read,Write}Opts are automatically casted to {Reader,Writer}Options if possible + // and included in readOpts and writeOpts; no need to specify twice + readOpts []ReaderOption + writeOpts []WriterOption + // {read,write}Opts are automatically casted to Recognizing{Reader,Writer}Options if possible + // and included in recognizing{Read,Write}Opts; no need to specify twice + recognizingReadOpts []RecognizingReaderOption + recognizingWriteOpts []RecognizingWriterOption + + name string + testdata []testdata + // Reader.ReadFrame will be called len(readResults) times. If a err == nil return is expected, just put + // nil in the error slice. Similarly for Writer.WriteFrame and writeResults. + // Note that len(readResults) >= len(frames) and len(writeResults) >= len(frames) must hold. + // By issuing more reads or writes than there are frames, one can check the error behavior + readResults []error + writeResults []error + // if closeWriterIdx or closeReaderIdx are non-nil, the Reader/Writer will be closed after the read at + // that specified index. closeWriterErr and closeReaderErr can be used to check the error returned by + // the close call. + closeWriterIdx *int64 + closeWriterErr error + closeReaderIdx *int64 + closeReaderErr error +} + +type testdata struct { + ct content.ContentType + single, recognizing bool + // frames contain the individual frames of rawData, which in turn is the content of the underlying + // source/stream. if len(writeResults) == 0, there will be no checking that writing all frames + // in order will produce the correct rawData. if len(readResults) == 0, there will be no checking + // that reading rawData will produce the frames string + rawData string + frames []string +} + +const ( + yamlSep = "---\n" + noNewlineYAML = `foobar: true` + testYAML = noNewlineYAML + "\n" + testYAMLlen = int64(len(testYAML)) + messyYAMLP1 = ` +--- + +--- +` + noNewlineYAML + ` +` + messyYAMLP2 = ` + +--- +--- +` + noNewlineYAML + ` +---` + messyYAML = messyYAMLP1 + messyYAMLP2 + + testJSON = `{"foo":true} +` + testJSONlen = int64(len(testJSON)) + testJSON2 = `{"bar":"hello"} +` + messyJSONP1 = ` + +` + testJSON + ` +` + messyJSONP2 = ` + +` + testJSON + ` +` + messyJSON = messyJSONP1 + messyJSONP2 + + otherCT = content.ContentType("other") + otherFrame = "('other'; 9)\n('bar'; true)" + otherFrameLen = int64(len(otherFrame)) +) + +func TestReader(t *testing.T) { + // Some tests depend on this + require.Equal(t, testYAMLlen, testJSONlen) + NewFactoryTester(t, defaultFactory{}).Test() + assert.Nil(t, tracing.ForceFlushGlobal(context.Background(), 0)) +} + +// TODO: Test that closing of Readers and Writers works + +var defaultTestCases = []testcase{ + // Roundtrip cases + { + name: "simple roundtrip", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, + }, + writeResults: []error{nil, nil, nil, nil}, + readResults: []error{nil, io.EOF, io.EOF, io.EOF}, + }, + + { + name: "two-frame roundtrip with closed writer", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, + }, + writeResults: []error{nil, nil, nil, nil}, + readResults: []error{nil, nil, io.EOF, io.EOF}, + }, + // YAML newline addition + { + name: "YAML Read: a newline will be added", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: noNewlineYAML, frames: []string{testYAML}}, + }, + readResults: []error{nil, io.EOF}, + }, + { + name: "YAML Write: a newline will be added", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{noNewlineYAML}, rawData: yamlSep + testYAML}, + }, + writeResults: []error{nil}, + }, + // Empty frames + { + name: "Read: io.EOF when there are no non-empty frames", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: "---"}, + {ct: content.ContentTypeYAML, rawData: "---\n"}, + {ct: content.ContentTypeJSON, rawData: ""}, + {ct: content.ContentTypeJSON, rawData: " \n "}, + }, + readResults: []error{io.EOF}, + }, + { + name: "Write: Empty sanitized frames aren't written", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{"---", "---\n", " \n--- \n---"}}, + {ct: content.ContentTypeJSON, frames: []string{"", " \n ", " "}}, + }, + writeResults: []error{nil, nil, nil}, + }, + { + name: "Write: can write empty frames forever without errors", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, + }, + writeResults: []error{nil, nil, nil, nil, nil}, + readResults: []error{nil, nil, io.EOF}, + }, + // Sanitation + { + name: "YAML Read: a leading \\n--- will be ignored", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + noNewlineYAML, frames: []string{testYAML}}, + }, + readResults: []error{nil, io.EOF}, + }, + { + name: "YAML Read: a leading --- will be ignored", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: yamlSep + noNewlineYAML, frames: []string{testYAML}}, + }, + readResults: []error{nil, io.EOF}, + }, + { + name: "Read: sanitize messy content", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: messyYAML, frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, rawData: messyJSON, frames: []string{testJSON, testJSON}}, + }, + readResults: []error{nil, nil, io.EOF}, + }, + { + name: "Write: sanitize messy content", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{messyYAMLP1, messyYAMLP2}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{messyJSONP1, messyJSONP2}, rawData: testJSON + testJSON}, + }, + writeResults: []error{nil, nil}, + }, + // MaxFrameSize + { + name: "Read: the frame size is exactly within bounds, also enforce counter reset", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, rawData: testJSON + testJSON, frames: []string{testJSON, testJSON}}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{nil, nil, io.EOF}, + }, + { + name: "YAML Read: there is a newline before the initial ---, should sanitize", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{nil, nil, io.EOF}, + }, + { + name: "Read: the frame is out of bounds, on the same line", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: testYAML}, + {ct: content.ContentTypeJSON, rawData: testJSON}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen - 2)}}, + readResults: []error{&limitedio.ReadSizeOverflowError{}}, + }, + { + name: "YAML Read: the frame is out of bounds, but continues on the next line", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: testYAML + testYAML}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{&limitedio.ReadSizeOverflowError{}}, + }, + { + name: "Read: first frame ok, then always frame overflow", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: testYAML + yamlSep + testYAML + testYAML, frames: []string{testYAML}}, + {ct: content.ContentTypeJSON, rawData: testJSON + testJSON2, frames: []string{testJSON}}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{nil, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}}, + }, + { + name: "Write: the second frame is too large, ignore that, but allow writing smaller frames later", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML + testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2, testJSON}, rawData: testJSON + testJSON}, + }, + singleWriteOpts: []SingleWriterOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + writeResults: []error{nil, &limitedio.ReadSizeOverflowError{}, nil}, + }, + // TODO: test negative limits too + { + name: "first frame ok, then Read => EOF and Write => nil consistently", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, + }, + readResults: []error{nil, io.EOF, io.EOF, io.EOF, io.EOF}, + writeResults: []error{nil, nil, nil, nil, nil}, + }, + // MaxFrameCount + { + name: "Write: Don't allow writing more than a maximum amount of frames", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON, testJSON}, rawData: testJSON + testJSON}, + }, + writeResults: []error{nil, nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + writeOpts: []WriterOption{&Options{MaxFrameCount: 2}}, + }, + { + name: "Read: Don't allow reading more than a maximum amount of successful frames", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: testYAML + yamlSep + testYAML + yamlSep + testYAML, + frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, + rawData: testJSON + testJSON + testJSON, + frames: []string{testJSON, testJSON}}, + }, + readResults: []error{nil, nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 2}}, + }, + { + name: "Read: Don't allow reading more than a maximum amount of successful frames, and 10x in total", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: strings.Repeat("\n"+yamlSep, 10) + testYAML}, + }, + readResults: []error{&FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 1}}, + }, + { + name: "Read: Allow reading up to the maximum amount of 10x the successful frames count", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: strings.Repeat("\n"+yamlSep, 9) + testYAML + yamlSep + yamlSep, frames: []string{testYAML}}, + }, + readResults: []error{nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 1}}, + }, + { + name: "Read: Allow reading exactly that amount of successful frames, if then io.EOF", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: testYAML + yamlSep + testYAML, + frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, + rawData: testJSON + testJSON, + frames: []string{testJSON, testJSON}}, + }, + readResults: []error{nil, nil, io.EOF, io.EOF}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 2}}, + }, + // Other Framing Types and Single + { + name: "Roundtrip: Allow reading other framing types for single reader, check overflows too", + testdata: []testdata{ + {ct: otherCT, single: true, rawData: otherFrame, frames: []string{otherFrame}}, + }, + writeResults: []error{nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readResults: []error{nil, io.EOF, io.EOF, io.EOF}, + }, + { + name: "Read: other framing type frame size is exactly within bounds", + testdata: []testdata{ + {ct: otherCT, single: true, rawData: otherFrame, frames: []string{otherFrame}}, + }, + singleReadOpts: []SingleReaderOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen)}}, + readResults: []error{nil, io.EOF}, + }, + { + name: "Read: other framing type frame size overflow", + testdata: []testdata{ + {ct: otherCT, single: true, rawData: otherFrame}, + }, + singleReadOpts: []SingleReaderOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen - 1)}}, + readResults: []error{&limitedio.ReadSizeOverflowError{}, io.EOF, io.EOF}, + }, + { + name: "Write: other framing type frame size overflow", + testdata: []testdata{ + {ct: otherCT, single: true, frames: []string{otherFrame, otherFrame}}, + }, + singleWriteOpts: []SingleWriterOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen - 1)}}, + writeResults: []error{&limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}, nil}, + }, +} + +func NewFactoryTester(t *testing.T, f Factory) *FactoryTester { + return &FactoryTester{ + t: t, + factory: f, + cases: defaultTestCases, + } +} + +type FactoryTester struct { + t *testing.T + factory Factory + + cases []testcase +} + +func (h *FactoryTester) Test() { + for _, c := range h.cases { + h.t.Run(c.name, func(t *testing.T) { + h.testRoundtripCase(t, &c) + }) + } +} + +func (h *FactoryTester) testRoundtripCase(t *testing.T, c *testcase) { + sropt := (&singleReaderOptions{}).applyOptions(c.singleReadOpts) + swopt := (&singleWriterOptions{}).applyOptions(c.singleWriteOpts) + ropt := (&readerOptions{}).applyOptions(c.readOpts) + wopt := (&writerOptions{}).applyOptions(c.writeOpts) + + c.readOpts = append(c.readOpts, sropt) + c.recognizingReadOpts = append(c.recognizingReadOpts, sropt) + c.recognizingReadOpts = append(c.recognizingReadOpts, ropt) + + c.writeOpts = append(c.writeOpts, swopt) + c.recognizingWriteOpts = append(c.recognizingWriteOpts, swopt) + c.recognizingWriteOpts = append(c.recognizingWriteOpts, wopt) + + ctx := tracing.Context(true) + for i, data := range c.testdata { + subName := fmt.Sprintf("%d %s", i, data.ct) + t.Run(subName, func(t *testing.T) { + trName := fmt.Sprintf("%s %s", c.name, subName) + _ = tracing.FromContext(ctx, trName).TraceFunc(ctx, "", + func(ctx context.Context, _ trace.Span) error { + h.testRoundtripCaseContentType(t, ctx, c, &data) + return nil + }).Register() + }) + } +} + +func (h *FactoryTester) testRoundtripCaseContentType(t *testing.T, ctx context.Context, c *testcase, d *testdata) { + var buf bytes.Buffer + + readCloseCounter := &recordingCloser{} + writeCloseCounter := &recordingCloser{} + cw := content.NewWriter(compositeio.WriteCloser(&buf, writeCloseCounter)) + cr := content.NewReader(compositeio.ReadCloser(&buf, readCloseCounter)) + var w Writer + if d.single && d.recognizing { + panic("cannot be both single and recognizing") + } else if d.single && !d.recognizing { + w = h.factory.NewSingleWriter(d.ct, cw, c.singleWriteOpts...) + } else if !d.single && d.recognizing { + w = h.factory.NewRecognizingWriter(cw, c.recognizingWriteOpts...) + } else { + w = h.factory.NewWriter(d.ct, cw, c.writeOpts...) + } + assert.Equalf(t, w.ContentType(), d.ct, "Writer.content.ContentType") + + var r Reader + if d.single && d.recognizing { + panic("cannot be both single and recognizing") + } else if d.single && !d.recognizing { + r = h.factory.NewSingleReader(d.ct, cr, c.singleReadOpts...) + } else if !d.single && d.recognizing { + r = h.factory.NewRecognizingReader(ctx, cr, c.recognizingReadOpts...) + } else { + r = h.factory.NewReader(d.ct, cr, c.readOpts...) + } + assert.Equalf(t, r.ContentType(), d.ct, "Reader.content.ContentType") + + // Write frames using the writer + for i, expected := range c.writeResults { + var frame []byte + // Only write a frame using the writer if one is supplied + if i < len(d.frames) { + frame = []byte(d.frames[i]) + } + + // Write the frame using the writer and check the error + got := w.WriteFrame(ctx, frame) + assert.ErrorIsf(t, got, expected, "Writer.WriteFrame err %d", i) + + // If we should close the writer here, do it and check the expected error + if c.closeWriterIdx != nil && *c.closeWriterIdx == int64(i) { + assert.ErrorIsf(t, w.Close(ctx), c.closeWriterErr, "Writer.Close err %d", i) + } + } + + assert.Equalf(t, 0, writeCloseCounter.count, "Writer should not be closed") + + // Check that the written output was as expected, if writing is enabled + if len(c.writeResults) != 0 { + assert.Equalf(t, d.rawData, buf.String(), "Writer Output") + } else { + // If writing was not tested, make sure the buffer contains the raw data for reading + buf = *bytes.NewBufferString(d.rawData) + } + + // Read frames using the reader + for i, expected := range c.readResults { + // Check the expected error + frame, err := r.ReadFrame(ctx) + assert.ErrorIsf(t, err, expected, "Reader.ReadFrame err %d", i) + // Only check the frame content if there's an expected frame + if i < len(d.frames) { + assert.Equalf(t, d.frames[i], string(frame), "Reader.ReadFrame frame %d", i) + } + + // If we should close the reader here, do it and check the expected error + if c.closeReaderIdx != nil && *c.closeReaderIdx == int64(i) { + assert.ErrorIsf(t, r.Close(ctx), c.closeReaderErr, "Reader.Close err %d", i) + } + } + assert.Equalf(t, 0, readCloseCounter.count, "Reader should not be closed") +} + +type recordingCloser struct { + count int +} + +func (c *recordingCloser) Close() error { + c.count += 1 + return nil +} diff --git a/pkg/frame/utils.go b/pkg/frame/utils.go new file mode 100644 index 00000000..ebf676c4 --- /dev/null +++ b/pkg/frame/utils.go @@ -0,0 +1,78 @@ +package frame + +import ( + "context" + "errors" + "io" + + "github.com/weaveworks/libgitops/pkg/tracing" + "go.opentelemetry.io/otel/trace" +) + +// List is a list of list (byte arrays), used for convenience functions +type List [][]byte + +// ListFromReader is a convenience method that constructs a List by reading +// from the given Reader r until io.EOF. If an other error than io.EOF is returned, +// reading is aborted immediately and the error is returned. +func ListFromReader(ctx context.Context, r Reader) (List, error) { + var f List + for { + // Read until we get io.EOF or an error + frame, err := r.ReadFrame(ctx) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + // Append all list to the returned list + f = append(f, frame) + } + return f, nil +} + +func ListFromBytes(list ...[]byte) List { return list } + +// WriteTo is a convenience method that writes a set of list to a Writer. +// If an error occurs, writing stops and the error is returned. +func (f List) WriteTo(ctx context.Context, fw Writer) error { + // Loop all list in the list, and write them individually to the Writer + for _, frame := range f { + if err := fw.WriteFrame(ctx, frame); err != nil { + return err + } + } + return nil +} + +// ToIoWriteCloser transforms a Writer to an io.WriteCloser, by binding a relevant +// context.Context to it. If err != nil, then n == 0. If err == nil, then n == len(frame). +func ToIoWriteCloser(ctx context.Context, w Writer) io.WriteCloser { + return &ioWriterHelper{ctx, w} +} + +type ioWriterHelper struct { + ctx context.Context + parent Writer +} + +func (w *ioWriterHelper) Write(frame []byte) (n int, err error) { + if err := w.parent.WriteFrame(w.ctx, frame); err != nil { + return 0, err + } + return len(frame), nil +} +func (w *ioWriterHelper) Close() error { + return w.parent.Close(w.ctx) +} + +func closeWithTrace(ctx context.Context, c Closer, obj interface{}) error { + return tracing.FromContext(ctx, obj).TraceFunc(ctx, "Close", func(ctx context.Context, _ trace.Span) error { + return c.Close(ctx) + }).Register() +} + +// nopCloser returns nil when Close(ctx) is called +type nopCloser struct{} + +func (*nopCloser) Close(context.Context) error { return nil } diff --git a/pkg/frame/utils_test.go b/pkg/frame/utils_test.go new file mode 100644 index 00000000..c0c850f5 --- /dev/null +++ b/pkg/frame/utils_test.go @@ -0,0 +1,119 @@ +package frame + +import ( + "bytes" + "context" + "io" + "io/fs" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "github.com/weaveworks/libgitops/pkg/util/limitedio" +) + +type rawCloserExposer interface { + RawCloser() io.Closer +} + +func TestFromConstructors(t *testing.T) { + yamlPath := filepath.Join(t.TempDir(), "foo.yaml") + str := "foo: bar\n" + byteContent := []byte(str) + err := ioutil.WriteFile(yamlPath, byteContent, 0644) + require.Nil(t, err) + + ctx := tracing.Context(true) + // FromYAMLFile -- found + got, err := FromYAMLFile(yamlPath).ReadFrame(ctx) + assert.Nil(t, err) + assert.Equal(t, str, string(got)) + // content.FromFile -- already closed + f := content.FromFile(yamlPath) + (f.(rawCloserExposer)).RawCloser().Close() // deliberately close the file before giving it to the reader + got, err = NewYAMLReader(f).ReadFrame(ctx) + assert.ErrorIs(t, err, fs.ErrClosed) + assert.Empty(t, got) + // FromYAMLFile -- not found + got, err = FromYAMLFile(filepath.Join(t.TempDir(), "notexist.yaml")).ReadFrame(ctx) + assert.ErrorIs(t, err, fs.ErrNotExist) + assert.Empty(t, got) + // FromYAMLBytes + got, err = FromYAMLBytes(byteContent).ReadFrame(ctx) + assert.Nil(t, err) + assert.Equal(t, byteContent, got) + // FromYAMLString + got, err = FromYAMLString(str).ReadFrame(ctx) + assert.Nil(t, err) + assert.Equal(t, str, string(got)) + assert.Nil(t, tracing.ForceFlushGlobal(ctx, 0)) +} + +func TestToIoWriteCloser(t *testing.T) { + var buf bytes.Buffer + closeRec := &recordingCloser{} + cw := content.NewWriter(compositeio.WriteCloser(&buf, closeRec)) + w := NewYAMLWriter(cw, SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}) + ctx := tracing.Context(true) + iow := ToIoWriteCloser(ctx, w) + + byteContent := []byte(testYAML) + n, err := iow.Write(byteContent) + assert.Len(t, byteContent, n) + assert.Nil(t, err) + + // Check closing forwarding + assert.Nil(t, iow.Close()) + assert.Equal(t, 1, closeRec.count) + + // Try writing again + overflowContent := []byte(testYAML + testYAML) + n, err = iow.Write(overflowContent) + assert.Equal(t, 0, n) + assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{}) + // Assume the writer has been closed only once + assert.Equal(t, 1, closeRec.count) + assert.Equal(t, buf.String(), yamlSep+string(byteContent)) + + assert.Nil(t, tracing.ForceFlushGlobal(context.Background(), 0)) +} + +func TestListFromReader(t *testing.T) { + ctx := tracing.Context(true) + // Happy case + fr, err := ListFromReader(ctx, FromYAMLString(messyYAML)) + assert.Equal(t, List{[]byte(testYAML), []byte(testYAML)}, fr) + assert.Nil(t, err) + + // Non-happy case + r := NewJSONReader(content.FromString(testJSON2), SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen - 1)}) + fr, err = ListFromReader(ctx, r) + assert.Len(t, fr, 0) + assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{}) + assert.Nil(t, tracing.ForceFlushGlobal(ctx, 0)) +} + +func TestList_WriteTo(t *testing.T) { + var buf bytes.Buffer + // TODO: Automatically get the name of the writer passed in, to avoid having to name + // everything. i.e. content.NewWriterName(string, io.Writer) + cw := content.NewWriter(&buf) + w := NewYAMLWriter(cw) + ctx := context.Background() + // Happy case + err := ListFromBytes([]byte(testYAML), []byte(testYAML)).WriteTo(ctx, w) + assert.Equal(t, buf.String(), yamlSep+testYAML+yamlSep+testYAML) + assert.Nil(t, err) + + // Non-happy case + buf.Reset() + w = NewJSONWriter(cw, SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen)}) + err = ListFromBytes([]byte(testJSON), []byte(testJSON2)).WriteTo(ctx, w) + assert.Equal(t, buf.String(), testJSON) + assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{}) +} diff --git a/pkg/frame/writer.go b/pkg/frame/writer.go new file mode 100644 index 00000000..5a6f93fc --- /dev/null +++ b/pkg/frame/writer.go @@ -0,0 +1,76 @@ +package frame + +import ( + "context" + "sync" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" +) + +func newHighlevelWriter(w Writer, opts *writerOptions) Writer { + return &highlevelWriter{ + writer: w, + writerMu: &sync.Mutex{}, + opts: opts, + } +} + +type highlevelWriter struct { + writer Writer + writerMu *sync.Mutex + opts *writerOptions + // frameCount counts the amount of successful frames written + frameCount int64 +} + +func (w *highlevelWriter) WriteFrame(ctx context.Context, frame []byte) error { + w.writerMu.Lock() + defer w.writerMu.Unlock() + + return tracing.FromContext(ctx, w).TraceFunc(ctx, "WriteFrame", func(ctx context.Context, span trace.Span) error { + // Refuse to write too large frames + if w.opts.MaxFrameSize.IsLessThan(int64(len(frame))) { + return limitedio.ErrReadSizeOverflow(w.opts.MaxFrameSize) + } + // Refuse to write more than the maximum amount of frames + if w.opts.MaxFrameCount.IsLessThanOrEqual(w.frameCount) { + return ErrFrameCountOverflow(w.opts.MaxFrameCount) + } + + // Sanitize the frame + // TODO: Maybe create a composite writer that actually reads the given frame first, to + // fully sanitize/validate it, and first then write the frames out using the writer? + frame, err := sanitize.IfSupported(ctx, w.opts.Sanitizer, w.ContentType(), frame) + if err != nil { + return err + } + + // Register the amount of (sanitized) bytes and call the underlying Writer + span.SetAttributes(content.SpanAttrByteContent(frame)...) + + // Catch empty frames + if len(frame) == 0 { + return nil + } + + err = w.writer.WriteFrame(ctx, frame) + + // Increase the frame counter, if the write was successful + if err == nil { + w.frameCount += 1 + } + return err + }).Register() +} + +func (w *highlevelWriter) ContentType() content.ContentType { return w.writer.ContentType() } +func (w *highlevelWriter) Close(ctx context.Context) error { + return closeWithTrace(ctx, w.writer, w) +} + +// Just forward the metadata, don't do anything specific with it +func (w *highlevelWriter) ContentMetadata() content.Metadata { return w.writer.ContentMetadata() } diff --git a/pkg/frame/writer_delegate.go b/pkg/frame/writer_delegate.go new file mode 100644 index 00000000..fa968e97 --- /dev/null +++ b/pkg/frame/writer_delegate.go @@ -0,0 +1,58 @@ +package frame + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/content" +) + +func newDelegatingWriter(ct content.ContentType, w content.Writer) Writer { + return &delegatingWriter{ + // TODO: Register options? + MetadataContainer: w.ContentMetadata().Clone().ToContainer(), + ContentTyped: ct, + w: w, + } +} + +// delegatingWriter is an implementation of the Writer interface +type delegatingWriter struct { + content.MetadataContainer + content.ContentTyped + w content.Writer +} + +func (w *delegatingWriter) WriteFrame(ctx context.Context, frame []byte) error { + // Write the frame to the underlying writer + n, err := w.w.WithContext(ctx).Write(frame) + // Guard against short writes + return catchShortWrite(n, err, frame) +} + +func (w *delegatingWriter) Close(ctx context.Context) error { return w.w.WithContext(ctx).Close() } + +func newErrWriter(ct content.ContentType, err error, meta content.Metadata) Writer { + return &errWriter{ + meta.Clone().ToContainer(), + ct, + &nopCloser{}, + err, + } +} + +type errWriter struct { + content.MetadataContainer + content.ContentTyped + Closer + err error +} + +func (w *errWriter) WriteFrame(context.Context, []byte) error { return w.err } + +func catchShortWrite(n int, err error, frame []byte) error { + if n < len(frame) && err == nil { + err = io.ErrShortWrite + } + return err +} diff --git a/pkg/frame/writer_factory.go b/pkg/frame/writer_factory.go new file mode 100644 index 00000000..1191648c --- /dev/null +++ b/pkg/frame/writer_factory.go @@ -0,0 +1,50 @@ +package frame + +import ( + "io" + + "github.com/weaveworks/libgitops/pkg/content" + "k8s.io/apimachinery/pkg/runtime/serializer/json" +) + +func (defaultFactory) NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer { + o := defaultWriterOptions().applyOptions(opts) + + var lowlevel Writer + switch ct { + case content.ContentTypeYAML: + lowlevel = newDelegatingWriter(content.ContentTypeYAML, w.Wrap(func(underlying io.WriteCloser) io.Writer { + // This writer always prepends a "---" before each frame + return json.YAMLFramer.NewFrameWriter(underlying) + })) + case content.ContentTypeJSON: + // JSON documents are self-framing; hence, no need to wrap the writer in any way + lowlevel = newDelegatingWriter(content.ContentTypeJSON, w) + default: + return newErrWriter(ct, content.ErrUnsupportedContentType(ct), w.ContentMetadata()) + } + return newHighlevelWriter(lowlevel, o) +} + +func (defaultFactory) NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer { + o := defaultSingleWriterOptions().applyOptions(opts) + + return newHighlevelWriter(newDelegatingWriter(ct, w), &writerOptions{ + Options: Options{ + SingleOptions: o.SingleOptions, + MaxFrameCount: 1, + }, + }) +} + +func (f defaultFactory) NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer { + o := defaultRecognizingWriterOptions().applyOptions(opts) + + // Recognize the content type using the given recognizer + r, ct, err := content.NewRecognizingWriter(w, o.Recognizer) + if err != nil { + return newErrWriter("", err, r.ContentMetadata()) + } + // Re-use the logic of the "main" Writer constructor; validate ct there + return f.NewWriter(ct, w, o) +} diff --git a/pkg/frame/writer_test.go b/pkg/frame/writer_test.go new file mode 100644 index 00000000..80281407 --- /dev/null +++ b/pkg/frame/writer_test.go @@ -0,0 +1,34 @@ +package frame + +import ( + "bytes" + "context" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content" +) + +func TestNewWriter_Unrecognized(t *testing.T) { + fr := DefaultFactory().NewWriter(content.ContentType("doesnotexist"), content.NewWriter(io.Discard)) + ctx := context.Background() + err := fr.WriteFrame(ctx, make([]byte, 1)) + assert.ErrorIs(t, err, &content.UnsupportedContentTypeError{}) +} + +func TestWriterShortBuffer(t *testing.T) { + var buf bytes.Buffer + w := &halfWriter{&buf} + ctx := context.Background() + err := NewYAMLWriter(content.NewWriter(w)).WriteFrame(ctx, []byte("foo: bar")) + assert.Equal(t, io.ErrShortWrite, err) +} + +type halfWriter struct { + w io.Writer +} + +func (w *halfWriter) Write(p []byte) (int, error) { + return w.w.Write(p[0 : (len(p)+1)/2]) +} diff --git a/pkg/serializer/error_structs.go b/pkg/serializer/error_structs.go deleted file mode 100644 index 11109b37..00000000 --- a/pkg/serializer/error_structs.go +++ /dev/null @@ -1,52 +0,0 @@ -package serializer - -var _ ReadCloser = &errReadCloser{} - -type errReadCloser struct { - err error -} - -func (rc *errReadCloser) Read(p []byte) (n int, err error) { - err = rc.err - return -} - -func (rc *errReadCloser) Close() error { - return nil -} - -var _ FrameReader = &errFrameReader{} - -type errFrameReader struct { - err error - contentType ContentType -} - -func (fr *errFrameReader) ReadFrame() ([]byte, error) { - return nil, fr.err -} - -func (fr *errFrameReader) ContentType() ContentType { - return fr.contentType -} - -// Close implements io.Closer and closes the underlying ReadCloser -func (fr *errFrameReader) Close() error { - return nil -} - -var _ FrameWriter = &errFrameWriter{} - -type errFrameWriter struct { - err error - contentType ContentType -} - -func (fw *errFrameWriter) Write(_ []byte) (n int, err error) { - err = fw.err - return -} - -func (fw *errFrameWriter) ContentType() ContentType { - return fw.contentType -} diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go deleted file mode 100644 index 26ead8d2..00000000 --- a/pkg/serializer/frame_reader.go +++ /dev/null @@ -1,168 +0,0 @@ -package serializer - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "os" - - "k8s.io/apimachinery/pkg/runtime/serializer/json" -) - -const ( - defaultBufSize = 64 * 1024 // 64 kB - defaultMaxFrameSize = 16 * 1024 * 1024 // 16 MB -) - -var ( - // FrameOverflowErr is returned from FrameReader.ReadFrame when one frame exceeds the - // maximum size of 16 MB. - FrameOverflowErr = errors.New("frame was larger than maximum allowed size") -) - -// ReadCloser in this package is an alias for io.ReadCloser. It helps in Godoc to locate -// helpers in this package which returns writers (i.e. FromFile and FromBytes) -type ReadCloser io.ReadCloser - -// FrameReader is a content-type specific reader of a given ReadCloser. -// The FrameReader reads frames from the underlying ReadCloser and returns them for consumption. -// When io.EOF is reached, the stream is closed automatically. -type FrameReader interface { - ContentTyped - io.Closer - - // ReadFrame reads frames from the underlying ReadCloser and returns them for consumption. - // When io.EOF is reached, the stream is closed automatically. - ReadFrame() ([]byte, error) -} - -// NewFrameReader returns a FrameReader for the given ContentType and data in the -// ReadCloser. The Reader is automatically closed in io.EOF. ReadFrame is called -// once each Decoder.Decode() or Decoder.DecodeInto() call. When Decoder.DecodeAll() is -// called, the FrameReader is read until io.EOF, upon where it is closed. -func NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader { - switch contentType { - case ContentTypeYAML: - return newFrameReader(json.YAMLFramer.NewFrameReader(rc), contentType) - case ContentTypeJSON: - return newFrameReader(json.Framer.NewFrameReader(rc), contentType) - default: - return &errFrameReader{ErrUnsupportedContentType, contentType} - } -} - -// NewYAMLFrameReader returns a FrameReader that supports both YAML and JSON. Frames are separated by "---\n" -// -// This call is the same as NewFrameReader(ContentTypeYAML, rc) -func NewYAMLFrameReader(rc ReadCloser) FrameReader { - return NewFrameReader(ContentTypeYAML, rc) -} - -// NewJSONFrameReader returns a FrameReader that supports both JSON. Objects are read from the stream one-by-one, -// each object making up its own frame. -// -// This call is the same as NewFrameReader(ContentTypeJSON, rc) -func NewJSONFrameReader(rc ReadCloser) FrameReader { - return NewFrameReader(ContentTypeJSON, rc) -} - -// newFrameReader returns a new instance of the frameReader struct -func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader { - return &frameReader{ - rc: rc, - bufSize: defaultBufSize, - maxFrameSize: defaultMaxFrameSize, - contentType: contentType, - } -} - -// frameReader is a FrameReader implementation -type frameReader struct { - rc io.ReadCloser - bufSize int - maxFrameSize int - contentType ContentType - - // TODO: Maybe add mutexes for thread-safety (so no two goroutines read at the same time) -} - -// ReadFrame reads one frame from the underlying io.Reader. ReadFrame -// keeps on reading from the Reader in bufSize blocks, until the Reader either -// returns err == nil or EOF. If the Reader reports an ErrShortBuffer error, -// ReadFrame keeps on reading using new calls. ReadFrame might return both data and -// io.EOF. io.EOF will be returned in the final call. -func (rf *frameReader) ReadFrame() (frame []byte, err error) { - // Temporary buffer to parts of a frame into - var buf []byte - // How many bytes were read by the read call - var n int - // Multiplier for bufsize - c := 1 - for { - // Allocate a buffer of a multiple of bufSize. - buf = make([]byte, c*rf.bufSize) - // Call the underlying reader. - n, err = rf.rc.Read(buf) - // Append the returned bytes to the b slice returned - // If n is 0, this call is a no-op - frame = append(frame, buf[:n]...) - - // If the frame got bigger than the max allowed size, return and report the error - if len(frame) > rf.maxFrameSize { - err = FrameOverflowErr - return - } - - // Handle different kinds of errors - switch err { - case io.ErrShortBuffer: - // ignore the "buffer too short" error, and just keep on reading, now doubling the buffer - c *= 2 - continue - case nil: - // One document is "done reading", we should return it if valid - // Only return non-empty documents, i.e. skip e.g. leading `---` - if len(bytes.TrimSpace(frame)) > 0 { - // valid non-empty document - return - } - // The document was empty, reset the frame (just to be sure) and continue - frame = nil - continue - case io.EOF: - // we reached the end of the file, close the reader and return - rf.rc.Close() - return - default: - // unknown error, return it immediately - // TODO: Maybe return the error here? - return - } - } -} - -// ContentType returns the content type for the given FrameReader -func (rf *frameReader) ContentType() ContentType { - return rf.contentType -} - -// Close implements io.Closer and closes the underlying ReadCloser -func (rf *frameReader) Close() error { - return rf.rc.Close() -} - -// FromFile returns a ReadCloser from the given file, or a ReadCloser which returns -// the given file open error when read. -func FromFile(filePath string) ReadCloser { - f, err := os.Open(filePath) - if err != nil { - return &errReadCloser{err} - } - return f -} - -// FromBytes returns a ReadCloser from the given byte content. -func FromBytes(content []byte) ReadCloser { - return ioutil.NopCloser(bytes.NewReader(content)) -} diff --git a/pkg/serializer/frame_reader_test.go b/pkg/serializer/frame_reader_test.go deleted file mode 100644 index a696ed7d..00000000 --- a/pkg/serializer/frame_reader_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package serializer - -import ( - "io" - "io/ioutil" - "reflect" - "strings" - "testing" - - "k8s.io/apimachinery/pkg/runtime/serializer/json" -) - -const ( - fooYAML = `kind: Foo -apiVersion: bar/v1 -a: b1234567890 -c: d1234567890 -e: f1234567890 -hello: true` - - barYAML = `kind: Bar -apiVersion: foo/v1 -a: b1234567890 -c: d1234567890 -e: f1234567890 -hello: false` - - bazYAML = `baz: true` - - testYAML = "\n---\n" + fooYAML + "\n---\n" + barYAML + "\n---\n" + bazYAML -) - -func Test_FrameReader_ReadFrame(t *testing.T) { - testYAMLReadCloser := json.YAMLFramer.NewFrameReader(ioutil.NopCloser(strings.NewReader(testYAML))) - - type fields struct { - rc io.ReadCloser - bufSize int - maxFrameSize int - } - type result struct { - wantB []byte - wantErr bool - } - tests := []struct { - name string - fields fields - wants []result - }{ - { - name: "three-document YAML case", - fields: fields{ - rc: testYAMLReadCloser, - bufSize: 16, - maxFrameSize: 1024, - }, - wants: []result{ - { - wantB: []byte(fooYAML), - wantErr: false, - }, - { - wantB: []byte(barYAML), - wantErr: false, - }, - { - wantB: []byte(bazYAML), - wantErr: false, - }, - { - wantB: nil, - wantErr: true, - }, - }, - }, - { - name: "maximum size reached", - fields: fields{ - rc: testYAMLReadCloser, - bufSize: 16, - maxFrameSize: 32, - }, - wants: []result{ - { - wantB: nil, - wantErr: true, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rf := &frameReader{ - rc: tt.fields.rc, - bufSize: tt.fields.bufSize, - maxFrameSize: tt.fields.maxFrameSize, - } - for _, expected := range tt.wants { - gotB, err := rf.ReadFrame() - if (err != nil) != expected.wantErr { - t.Errorf("frameReader.ReadFrame() error = %v, wantErr %v", err, expected.wantErr) - return - } - if len(gotB) < len(expected.wantB) { - t.Errorf("frameReader.ReadFrame(): got smaller slice %v than expected %v", gotB, expected.wantB) - return - } - if !reflect.DeepEqual(gotB[:len(expected.wantB)], expected.wantB) { - t.Errorf("frameReader.ReadFrame() = %v, want %v", gotB, expected.wantB) - } - } - }) - } -} diff --git a/pkg/serializer/frame_utils.go b/pkg/serializer/frame_utils.go deleted file mode 100644 index 12c65e16..00000000 --- a/pkg/serializer/frame_utils.go +++ /dev/null @@ -1,37 +0,0 @@ -package serializer - -import "io" - -// FrameList is a list of frames (byte arrays), used for convenience functions -type FrameList [][]byte - -// ReadFrameList is a convenience method that reads all available frames from the FrameReader -// into a returned FrameList -func ReadFrameList(fr FrameReader) (FrameList, error) { - // TODO: Create an unit test for this function - var frameList [][]byte - for { - // Read until we get io.EOF or an error - frame, err := fr.ReadFrame() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - // Append all frames to the returned list - frameList = append(frameList, frame) - } - return frameList, nil -} - -// WriteFrameList is a convenience method that writes a set of frames to a FrameWriter -func WriteFrameList(fw FrameWriter, frameList FrameList) error { - // TODO: Create an unit test for this function - // Loop all frames in the list, and write them individually to the FrameWriter - for _, frame := range frameList { - if _, err := fw.Write(frame); err != nil { - return err - } - } - return nil -} diff --git a/pkg/serializer/frame_writer.go b/pkg/serializer/frame_writer.go deleted file mode 100644 index d2f0fc45..00000000 --- a/pkg/serializer/frame_writer.go +++ /dev/null @@ -1,128 +0,0 @@ -package serializer - -import ( - "io" -) - -const ( - yamlSeparator = "---\n" -) - -// Writer in this package is an alias for io.Writer. It helps in Godoc to locate -// helpers in this package which returns writers (i.e. ToBytes) -type Writer io.Writer - -// FrameWriter is a ContentType-specific io.Writer that writes given frames in an applicable way -// to an underlying io.Writer stream -type FrameWriter interface { - ContentTyped - Writer -} - -// NewFrameWriter returns a new FrameWriter for the given Writer and ContentType -func NewFrameWriter(contentType ContentType, w Writer) FrameWriter { - switch contentType { - case ContentTypeYAML: - // Use our own implementation of the underlying YAML FrameWriter - return &frameWriter{newYAMLWriter(w), contentType} - case ContentTypeJSON: - // Comment from k8s.io/apimachinery/pkg/runtime/serializer/json.Framer.NewFrameWriter: - // "we can write JSON objects directly to the writer, because they are self-framing" - // Hence, we directly use w without any modifications. - return &frameWriter{w, contentType} - default: - return &errFrameWriter{ErrUnsupportedContentType, contentType} - } -} - -// NewYAMLFrameWriter returns a FrameWriter that writes YAML frames separated by "---\n" -// -// This call is the same as NewFrameWriter(ContentTypeYAML, w) -func NewYAMLFrameWriter(w Writer) FrameWriter { - return NewFrameWriter(ContentTypeYAML, w) -} - -// NewJSONFrameWriter returns a FrameWriter that writes JSON frames without separation -// (i.e. "{ ... }{ ... }{ ... }" on the wire) -// -// This call is the same as NewFrameWriter(ContentTypeYAML, w) -func NewJSONFrameWriter(w Writer) FrameWriter { - return NewFrameWriter(ContentTypeJSON, w) -} - -// frameWriter is an implementation of the FrameWriter interface -type frameWriter struct { - Writer - - contentType ContentType - - // TODO: Maybe add mutexes for thread-safety (so no two goroutines write at the same time) -} - -// ContentType returns the content type for the given FrameWriter -func (wf *frameWriter) ContentType() ContentType { - return wf.contentType -} - -// newYAMLWriter returns a new yamlWriter implementation -func newYAMLWriter(w Writer) *yamlWriter { - return &yamlWriter{ - w: w, - hasWritten: false, - } -} - -// yamlWriter writes yamlSeparator between documents -type yamlWriter struct { - w io.Writer - hasWritten bool -} - -// Write implements io.Writer -func (w *yamlWriter) Write(p []byte) (n int, err error) { - // If we've already written some documents, add the separator in between - if w.hasWritten { - _, err = w.w.Write([]byte(yamlSeparator)) - if err != nil { - return - } - } - - // Write the given bytes to the underlying writer - n, err = w.w.Write(p) - if err != nil { - return - } - - // Mark that we've now written once and should write the separator in between - w.hasWritten = true - return -} - -// ToBytes returns a Writer which can be passed to NewFrameWriter. The Writer writes directly -// to an underlying byte array. The byte array must be of enough length in order to write. -func ToBytes(p []byte) Writer { - return &byteWriter{p, 0} -} - -type byteWriter struct { - to []byte - // the next index to write to - index int -} - -func (w *byteWriter) Write(from []byte) (n int, err error) { - // Check if we have space in to, in order to write bytes there - if w.index+len(from) > len(w.to) { - err = io.ErrShortBuffer - return - } - // Copy over the bytes one by one - for i := range from { - w.to[w.index+i] = from[i] - } - // Increase the index for the next Write call's target position - w.index += len(from) - n += len(from) - return -} diff --git a/pkg/serializer/frame_writer_test.go b/pkg/serializer/frame_writer_test.go deleted file mode 100644 index 988dacbc..00000000 --- a/pkg/serializer/frame_writer_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package serializer - -import ( - "bytes" - "testing" -) - -func Test_byteWriter_Write(t *testing.T) { - type fields struct { - to []byte - index int - } - type args struct { - from []byte - } - tests := []struct { - name string - fields fields - args args - wantN int - wantErr bool - }{ - { - name: "simple case", - fields: fields{ - to: make([]byte, 50), - }, - args: args{ - from: []byte("Hello!\nFoobar"), - }, - wantN: 13, - wantErr: false, - }, - { - name: "target too short", - fields: fields{ - to: make([]byte, 10), - }, - args: args{ - from: []byte("Hello!\nFoobar"), - }, - wantN: 0, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - w := &byteWriter{ - to: tt.fields.to, - index: tt.fields.index, - } - gotN, err := w.Write(tt.args.from) - if (err != nil) != tt.wantErr { - t.Errorf("byteWriter.Write() error = %v, wantErr %v", err, tt.wantErr) - return - } - if gotN != tt.wantN { - t.Errorf("byteWriter.Write() = %v, want %v", gotN, tt.wantN) - return - } - if !tt.wantErr && !bytes.Equal(tt.fields.to[:gotN], tt.args.from) { - t.Errorf("byteWriter.Write(): expected fields.to (%s) to equal args.from (%s), but didn't", tt.fields.to[:gotN], tt.args.from) - } - }) - } -} From 78ef1c1c3358f6ef132dc64cfd6708219d486ee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:35:25 +0300 Subject: [PATCH 10/19] Update the serializer package to use the new framing code. --- pkg/serializer/comments.go | 31 ++++--- pkg/serializer/comments_test.go | 2 +- pkg/serializer/decode.go | 33 ++++--- pkg/serializer/encode.go | 26 +++--- pkg/serializer/serializer.go | 54 ++++-------- pkg/serializer/serializer_test.go | 138 ++++++++++++++++-------------- 6 files changed, 143 insertions(+), 141 deletions(-) diff --git a/pkg/serializer/comments.go b/pkg/serializer/comments.go index 302c4db0..397a1bc3 100644 --- a/pkg/serializer/comments.go +++ b/pkg/serializer/comments.go @@ -2,12 +2,15 @@ package serializer import ( "bytes" + "context" "encoding/base64" "errors" "fmt" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/serializer/comments" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" + "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/kustomize/kyaml/yaml" @@ -24,10 +27,10 @@ var ( // tryToPreserveComments tries to save the original file data (base64-encoded) into an annotation. // This original file data can be used at encoding-time to preserve comments -func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct ContentType) { +func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct content.ContentType) { // If the user opted into preserving comments and the format is YAML, proceed // If they didn't, return directly - if !(*d.opts.PreserveComments && ct == ContentTypeYAML) { + if !(*d.opts.PreserveComments && ct == content.ContentTypeYAML) { return } @@ -39,7 +42,7 @@ func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct Conte } // tryToPreserveComments tries to locate the possibly-saved original file data in the object's annotation -func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object, metaObj metav1.Object) error { +func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw frame.Writer, obj runtime.Object, metaObj metav1.Object) error { // If the user did not opt into preserving comments, just sanitize ObjectMeta temporarily and and return if !*e.opts.PreserveComments { // Normal encoding without the annotation (so it doesn't leak by accident) @@ -47,8 +50,8 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr } // The user requested to preserve comments, but content type is not YAML, so log, sanitize and return - if fw.ContentType() != ContentTypeYAML { - logrus.Debugf("Asked to preserve comments, but ContentType is not YAML, so ignoring") + if fw.ContentType() != content.ContentTypeYAML { + logrus.Debugf("Asked to preserve comments, but content.ContentType is not YAML, so ignoring") // Normal encoding without the annotation (so it doesn't leak by accident) return noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, fw, obj)) @@ -62,9 +65,9 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr return err } - // Encode the new object into a temporary buffer, it should not be written as the "final result" to the FrameWriter - buf := new(bytes.Buffer) - if err := noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, NewYAMLFrameWriter(buf), obj)); err != nil { + // Encode the new object into a temporary buffer, it should not be written as the "final result" to the frame.Writer + var buf bytes.Buffer + if err := noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, frame.ToYAMLBuffer(&buf), obj)); err != nil { // fatal error return err } @@ -83,15 +86,17 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr return err } - // Print the new schema with the old comments kept to the FrameWriter - _, err = fmt.Fprint(fw, afterNode.MustString()) + // Print the new schema with the old comments kept to the frame.Writer + ctx := context.TODO() + _, err = fmt.Fprint(frame.ToIoWriteCloser(ctx, fw), afterNode.MustString()) // we're done, exit the encode function return err } -func (e *encoder) normalEncodeFunc(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object) func() error { +func (e *encoder) normalEncodeFunc(versionEncoder runtime.Encoder, fw frame.Writer, obj runtime.Object) func() error { return func() error { - return versionEncoder.Encode(obj, fw) + ctx := context.TODO() + return versionEncoder.Encode(obj, frame.ToIoWriteCloser(ctx, fw)) } } diff --git a/pkg/serializer/comments_test.go b/pkg/serializer/comments_test.go index 8f4c65c2..3f15a02f 100644 --- a/pkg/serializer/comments_test.go +++ b/pkg/serializer/comments_test.go @@ -37,7 +37,7 @@ spec: var: true status: nested: - fields: + fields: {} # Just a comment ` diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index 4feff21f..c2e1a85e 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -1,10 +1,13 @@ package serializer import ( + "context" "fmt" "io" "reflect" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -38,7 +41,7 @@ type DecodingOptions struct { DecodeListElements *bool // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. - // Only applicable to ContentTypeYAML framers. + // Only applicable to content.ContentTypeYAML framers. // Using any other framer will be silently ignored. Usage of this option also requires setting // the PreserveComments in EncodingOptions, too. (Default: false) PreserveComments *bool @@ -120,7 +123,7 @@ type decoder struct { opts DecodingOptions } -// Decode returns the decoded object from the next document in the FrameReader stream. +// Decode returns the decoded object from the next document in the frame.Reader stream. // If there are multiple documents in the underlying stream, this call will read one // document and return it. Decode might be invoked for getting new documents until it // returns io.EOF. When io.EOF is reached in a call, the stream is automatically closed. @@ -136,17 +139,18 @@ type decoder struct { // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. // opts.DecodeListElements is not applicable in this call. -func (d *decoder) Decode(fr FrameReader) (runtime.Object, error) { - // Read a frame from the FrameReader +func (d *decoder) Decode(fr frame.Reader) (runtime.Object, error) { + // Read a frame from the frame.Reader // TODO: Make sure to test the case when doc might contain something, and err is io.EOF - doc, err := fr.ReadFrame() + ctx := context.TODO() + doc, err := fr.ReadFrame(ctx) if err != nil { return nil, err } return d.decode(doc, nil, fr.ContentType()) } -func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runtime.Object, error) { +func (d *decoder) decode(doc []byte, into runtime.Object, ct content.ContentType) (runtime.Object, error) { // If the scheme doesn't recognize a v1.List, and we enabled opts.DecodeListElements, // make the scheme able to decode the v1.List automatically if *d.opts.DecodeListElements && !d.scheme.Recognizes(listGVK) { @@ -190,7 +194,7 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti return obj, nil } -// DecodeInto decodes the next document in the FrameReader stream into obj if the types are matching. +// DecodeInto decodes the next document in the frame.Reader stream into obj if the types are matching. // If there are multiple documents in the underlying stream, this call will read one // document and return it. Decode might be invoked for getting new documents until it // returns io.EOF. When io.EOF is reached in a call, the stream is automatically closed. @@ -207,10 +211,11 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti // opts.DecodeUnknown is not applicable in this call. In case you want to decode an object into a // *runtime.Unknown, just create a runtime.Unknown object and pass the pointer as obj into DecodeInto // and it'll work. -func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error { - // Read a frame from the FrameReader. +func (d *decoder) DecodeInto(fr frame.Reader, into runtime.Object) error { + // Read a frame from the frame.Reader. // TODO: Make sure to test the case when doc might contain something, and err is io.EOF - doc, err := fr.ReadFrame() + ctx := context.TODO() + doc, err := fr.ReadFrame(ctx) if err != nil { return err } @@ -220,7 +225,7 @@ func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error { return err } -// DecodeAll returns the decoded objects from all documents in the FrameReader stream. The underlying +// DecodeAll returns the decoded objects from all documents in the frame.Reader stream. The underlying // stream is automatically closed on io.EOF. io.EOF is never returned from this function. // If any decoded object is for an unrecognized group, or version, UnrecognizedGroupError // or UnrecognizedVersionError might be returned. @@ -235,7 +240,7 @@ func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error { // added into the returning slice. The v1.List will in this case not be returned. // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. -func (d *decoder) DecodeAll(fr FrameReader) ([]runtime.Object, error) { +func (d *decoder) DecodeAll(fr frame.Reader) ([]runtime.Object, error) { objs := []runtime.Object{} for { obj, err := d.Decode(fr) @@ -258,7 +263,7 @@ func (d *decoder) DecodeAll(fr FrameReader) ([]runtime.Object, error) { } // decodeUnknown decodes bytes of a certain content type into a returned *runtime.Unknown object -func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, error) { +func (d *decoder) decodeUnknown(doc []byte, ct content.ContentType) (runtime.Object, error) { // Do a DecodeInto the new pointer to the object we've got. The resulting into object is // also returned. // The content type isn't really used here, as runtime.Unknown will never implement @@ -295,7 +300,7 @@ func (d *decoder) handleDecodeError(doc []byte, origErr error) error { return origErr } -func (d *decoder) extractNestedObjects(obj runtime.Object, ct ContentType) ([]runtime.Object, error) { +func (d *decoder) extractNestedObjects(obj runtime.Object, ct content.ContentType) ([]runtime.Object, error) { // If we didn't ask for list-unwrapping functionality, return directly if !*d.opts.DecodeListElements { return []runtime.Object{obj}, nil diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index 77061932..5e01efee 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -1,7 +1,11 @@ package serializer import ( + "context" + "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/util" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -12,7 +16,7 @@ type EncodingOptions struct { // TODO: Fix that sometimes omitempty fields aren't respected Pretty *bool // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. - // Only applicable to ContentTypeYAML framers. + // Only applicable to content.ContentTypeYAML framers. // Using any other framer will be silently ignored. Usage of this option also requires setting // the PreserveComments in DecodingOptions, too. (Default: false) // TODO: Make this a BestEffort & Strict mode @@ -70,12 +74,12 @@ func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder { } } -// Encode encodes the given objects and writes them to the specified FrameWriter. -// The FrameWriter specifies the ContentType. This encoder will automatically convert any +// Encode encodes the given objects and writes them to the specified frame.Writer. +// The frame.Writer specifies the content.ContentType. This encoder will automatically convert any // internal object given to the preferred external groupversion. No conversion will happen // if the given object is of an external version. // TODO: This should automatically convert to the preferred version -func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { +func (e *encoder) Encode(fw frame.Writer, objs ...runtime.Object) error { for _, obj := range objs { // Get the kind for the given object gvk, err := GVKForObject(e.scheme, obj) @@ -102,12 +106,13 @@ func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { // EncodeForGroupVersion encodes the given object for the specific groupversion. If the object // is not of that version currently it will try to convert. The output bytes are written to the -// FrameWriter. The FrameWriter specifies the ContentType. -func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error { +// frame.Writer. The frame.Writer specifies the content.ContentType. +func (e *encoder) EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error { // Get the serializer for the media type - serializerInfo, ok := runtime.SerializerInfoForMediaType(e.codecs.SupportedMediaTypes(), string(fw.ContentType())) + serializerInfo, ok := runtime.SerializerInfoForMediaType(e.codecs.SupportedMediaTypes(), fw.ContentType().String()) if !ok { - return ErrUnsupportedContentType + // TODO: Also mention what content types _are_ supported here + return content.ErrUnsupportedContentType(fw.ContentType()) } // Choose the pretty or non-pretty one @@ -120,7 +125,7 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s if serializerInfo.PrettySerializer != nil { encoder = serializerInfo.PrettySerializer } else { - logrus.Debugf("PrettySerializer for ContentType %s is nil, falling back to Serializer.", fw.ContentType()) + logrus.Debugf("PrettySerializer for content.ContentType %s is nil, falling back to Serializer.", fw.ContentType()) } } @@ -131,7 +136,8 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s metaobj, ok := toMetaObject(obj) // For objects without ObjectMeta, the cast will fail. Allow that failure and do "normal" encoding if !ok { - return versionEncoder.Encode(obj, fw) + ctx := context.TODO() + return versionEncoder.Encode(obj, frame.ToIoWriteCloser(ctx, fw)) } // Specialize the encoder for a specific gv and encode the object diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index eb798c91..de4be7ec 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -1,47 +1,25 @@ package serializer import ( - "errors" "fmt" + "github.com/weaveworks/libgitops/pkg/frame" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer" ) -// ContentType specifies a content type for Encoders, Decoders, FrameWriters and FrameReaders -type ContentType string - -const ( - // ContentTypeJSON specifies usage of JSON as the content type. - // It is an alias for k8s.io/apimachinery/pkg/runtime.ContentTypeJSON - ContentTypeJSON = ContentType(runtime.ContentTypeJSON) - - // ContentTypeYAML specifies usage of YAML as the content type. - // It is an alias for k8s.io/apimachinery/pkg/runtime.ContentTypeYAML - ContentTypeYAML = ContentType(runtime.ContentTypeYAML) -) - -// ErrUnsupportedContentType is returned if the specified content type isn't supported -var ErrUnsupportedContentType = errors.New("unsupported content type") - -// ContentTyped is an interface for objects that are specific to a set ContentType. -type ContentTyped interface { - // ContentType returns the ContentType (usually ContentTypeYAML or ContentTypeJSON) for the given object. - ContentType() ContentType -} - // Serializer is an interface providing high-level decoding/encoding functionality // for types registered in a *runtime.Scheme type Serializer interface { // Decoder is a high-level interface for decoding Kubernetes API Machinery objects read from - // a FrameWriter. The decoder can be customized by passing some options (e.g. WithDecodingOptions) + // a frame.Writer. The decoder can be customized by passing some options (e.g. WithDecodingOptions) // to this call. // The decoder supports both "classic" API Machinery objects and controller-runtime CRDs Decoder(optsFn ...DecodingOptionsFunc) Decoder // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them - // to a FrameWriter. The encoder can be customized by passing some options (e.g. WithEncodingOptions) + // to a frame.Writer. The encoder can be customized by passing some options (e.g. WithEncodingOptions) // to this call. // The encoder supports both "classic" API Machinery objects and controller-runtime CRDs Encoder(optsFn ...EncodingOptionsFunc) Encoder @@ -68,25 +46,25 @@ type schemeAndCodec struct { } // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them -// to a FrameWriter. +// to a frame.Writer. type Encoder interface { - // Encode encodes the given objects and writes them to the specified FrameWriter. - // The FrameWriter specifies the ContentType. This encoder will automatically convert any + // Encode encodes the given objects and writes them to the specified frame.Writer. + // The frame.Writer specifies the content.ContentType. This encoder will automatically convert any // internal object given to the preferred external groupversion. No conversion will happen // if the given object is of an external version. - Encode(fw FrameWriter, obj ...runtime.Object) error + Encode(fw frame.Writer, obj ...runtime.Object) error // EncodeForGroupVersion encodes the given object for the specific groupversion. If the object // is not of that version currently it will try to convert. The output bytes are written to the - // FrameWriter. The FrameWriter specifies the ContentType. - EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error + // frame.Writer. The frame.Writer specifies the content.ContentType. + EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error } // Decoder is a high-level interface for decoding Kubernetes API Machinery objects read from -// a FrameWriter. The decoder can be customized by passing some options (e.g. WithDecodingOptions) +// a frame.Writer. The decoder can be customized by passing some options (e.g. WithDecodingOptions) // to this call. type Decoder interface { - // Decode returns the decoded object from the next document in the FrameReader stream. + // Decode returns the decoded object from the next document in the frame.Reader stream. // If there are multiple documents in the underlying stream, this call will read one // document and return it. Decode might be invoked for getting new documents until it // returns io.EOF. When io.EOF is reached in a call, the stream is automatically closed. @@ -101,9 +79,9 @@ type Decoder interface { // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. // opts.DecodeListElements is not applicable in this call. - Decode(fr FrameReader) (runtime.Object, error) + Decode(fr frame.Reader) (runtime.Object, error) - // DecodeInto decodes the next document in the FrameReader stream into obj if the types are matching. + // DecodeInto decodes the next document in the frame.Reader stream into obj if the types are matching. // If there are multiple documents in the underlying stream, this call will read one // document and return it. Decode might be invoked for getting new documents until it // returns io.EOF. When io.EOF is reached in a call, the stream is automatically closed. @@ -120,9 +98,9 @@ type Decoder interface { // opts.DecodeUnknown is not applicable in this call. In case you want to decode an object into a // *runtime.Unknown, just create a runtime.Unknown object and pass the pointer as obj into DecodeInto // and it'll work. - DecodeInto(fr FrameReader, obj runtime.Object) error + DecodeInto(fr frame.Reader, obj runtime.Object) error - // DecodeAll returns the decoded objects from all documents in the FrameReader stream. The underlying + // DecodeAll returns the decoded objects from all documents in the frame.Reader stream. The underlying // stream is automatically closed on io.EOF. io.EOF is never returned from this function. // If any decoded object is for an unrecognized group, or version, UnrecognizedGroupError // or UnrecognizedVersionError might be returned. @@ -137,7 +115,7 @@ type Decoder interface { // added into the returning slice. The v1.List will in this case not be returned. // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. - DecodeAll(fr FrameReader) ([]runtime.Object, error) + DecodeAll(fr frame.Reader) ([]runtime.Object, error) } // Converter is an interface that allows access to object conversion capabilities diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go index ba239855..c5f6193c 100644 --- a/pkg/serializer/serializer_test.go +++ b/pkg/serializer/serializer_test.go @@ -7,6 +7,9 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" @@ -30,9 +33,10 @@ var ( ext1gv = schema.GroupVersion{Group: groupname, Version: "v1alpha1"} ext2gv = schema.GroupVersion{Group: groupname, Version: "v1alpha2"} - intsb = runtime.NewSchemeBuilder(addInternalTypes) - ext1sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext1gv), v1_addDefaultingFuncs, registerOldCRD) - ext2sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext2gv), v2_addDefaultingFuncs, registerNewCRD) + intsb = runtime.NewSchemeBuilder(addInternalTypes) + ext1sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext1gv), v1_addDefaultingFuncs, registerOldCRD) + ext2sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext2gv), v2_addDefaultingFuncs, registerNewCRD) + yamlSep = []byte("---\n") ) func v1_addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -251,38 +255,45 @@ var ( newCRDMeta = metav1.TypeMeta{APIVersion: "foogroup/v1alpha2", Kind: "CRD"} unknownMeta = runtime.TypeMeta{APIVersion: "unknown/v1", Kind: "YouDontRecognizeMe"} - oneSimple = []byte(`apiVersion: foogroup/v1alpha1 + oneSimple = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: Simple testString: foo `) - simpleUnknownField = []byte(`apiVersion: foogroup/v1alpha1 + simpleUnknownField = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: Simple testString: foo unknownField: bar `) - simpleDuplicateField = []byte(`apiVersion: foogroup/v1alpha1 + simpleDuplicateField = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: Simple testString: foo testString: bar `) - unrecognizedVersion = []byte(`apiVersion: foogroup/v1alpha0 + unrecognizedVersion = []byte(`--- +apiVersion: foogroup/v1alpha0 kind: Simple testString: foo `) - unrecognizedGVK = []byte(`apiVersion: unknown/v1 + unrecognizedGVK = []byte(`--- +apiVersion: unknown/v1 kind: YouDontRecognizeMe testFooBar: true `) - oneComplex = []byte(`Int64: 0 + oneComplex = []byte(`--- +Int64: 0 apiVersion: foogroup/v1alpha1 bool: false int: 0 kind: Complex string: bar `) - simpleAndComplex = []byte(string(oneSimple) + "---\n" + string(oneComplex)) + simpleAndComplex = []byte(string(oneSimple) + string(oneComplex)) - testList = []byte(`apiVersion: v1 + testList = []byte(`--- +apiVersion: v1 kind: List items: - apiVersion: foogroup/v1alpha1 @@ -303,7 +314,8 @@ items: complexJSON = []byte(`{"apiVersion":"foogroup/v1alpha1","kind":"Complex","string":"bar","int":0,"Int64":0,"bool":false} `) - oldCRD = []byte(`# I'm a top comment + oldCRD = []byte(`--- +# I'm a top comment apiVersion: foogroup/v1alpha1 kind: CRD metadata: @@ -312,14 +324,16 @@ metadata: testString: foobar # Me too `) - oldCRDNoComments = []byte(`apiVersion: foogroup/v1alpha1 + oldCRDNoComments = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: CRD metadata: creationTimestamp: null testString: foobar `) - newCRD = []byte(`# I'm a top comment + newCRD = []byte(`--- +# I'm a top comment apiVersion: foogroup/v1alpha2 kind: CRD metadata: @@ -328,7 +342,8 @@ metadata: otherString: foobar # Me too `) - newCRDNoComments = []byte(`apiVersion: foogroup/v1alpha2 + newCRDNoComments = []byte(`--- +apiVersion: foogroup/v1alpha2 kind: CRD metadata: creationTimestamp: null @@ -342,34 +357,30 @@ func TestEncode(t *testing.T) { oldCRDObj := &CRDOldVersion{TestString: "foobar"} newCRDObj := &CRDNewVersion{OtherString: "foobar"} tests := []struct { - name string - ct ContentType - objs []runtime.Object - expected []byte - expectedErr bool + name string + ct content.ContentType + objs []runtime.Object + want []byte + wantErr error }{ - {"simple yaml", ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, false}, - {"complex yaml", ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, false}, - {"both simple and complex yaml", ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, false}, - {"simple json", ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, false}, - {"complex json", ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, false}, - {"old CRD yaml", ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, false}, - {"new CRD yaml", ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, false}, + {"simple yaml", content.ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, nil}, + {"complex yaml", content.ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, nil}, + {"both simple and complex yaml", content.ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, nil}, + {"simple json", content.ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, nil}, + {"complex json", content.ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, nil}, + {"old CRD yaml", content.ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, nil}, + {"new CRD yaml", content.ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, nil}, //{"no-conversion simple", defaultEncoder, &runtimetest.ExternalSimple{TestString: "foo"}, simpleJSON, false}, //{"support internal", defaultEncoder, []runtime.Object{simpleObj}, []byte(`{"testString":"foo"}` + "\n"), false}, } for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { - buf := new(bytes.Buffer) - actualErr := defaultEncoder.Encode(NewFrameWriter(rt.ct, buf), rt.objs...) - actual := buf.Bytes() - if (actualErr != nil) != rt.expectedErr { - t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actualErr != nil, actualErr) - } - if !bytes.Equal(actual, rt.expected) { - t2.Errorf("expected %q but actual %q", string(rt.expected), string(actual)) - } + var buf bytes.Buffer + cw := content.ToBuffer(&buf, content.WithContentType(rt.ct)) + err := defaultEncoder.Encode(frame.NewRecognizingWriter(cw), rt.objs...) + assert.ErrorIs(t, err, rt.wantErr) + assert.Equal(t, string(rt.want), buf.String()) }) } } @@ -381,8 +392,8 @@ func TestDecode(t *testing.T) { data []byte doDefaulting bool doConversion bool - expected runtime.Object - expectedErr bool + want runtime.Object + wantErr bool }{ {"old CRD hub conversion", oldCRD, false, true, &CRDNewVersion{newCRDMeta, metav1.ObjectMeta{}, "Old string foobar"}, false}, {"old CRD no conversion", oldCRD, false, false, &CRDOldVersion{oldCRDMeta, metav1.ObjectMeta{}, "foobar"}, false}, @@ -401,16 +412,12 @@ func TestDecode(t *testing.T) { for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { - obj, actual := ourserializer.Decoder( + obj, err := ourserializer.Decoder( WithDefaultsDecode(rt.doDefaulting), WithConvertToHubDecode(rt.doConversion), - ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) - if (actual != nil) != rt.expectedErr { - t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) - } - if rt.expected != nil && !reflect.DeepEqual(obj, rt.expected) { - t2.Errorf("expected %#v but actual %#v", rt.expected, obj) - } + ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) + assert.Equal(t, err != nil, rt.wantErr) + assert.Equal(t, rt.want, obj) }) } } @@ -433,8 +440,8 @@ func TestDecodeInto(t *testing.T) { {"complex external", oneComplex, false, &runtimetest.ExternalComplex{}, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar"}, false}, {"defaulted complex external", oneComplex, true, &runtimetest.ExternalComplex{}, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar", Integer64: 5}, false}, {"defaulted complex internal", oneComplex, true, &runtimetest.InternalComplex{}, &runtimetest.InternalComplex{String: "bar", Integer64: 5}, false}, - {"decode unknown obj into unknown", unrecognizedGVK, false, &runtime.Unknown{}, newUnknown(unknownMeta, unrecognizedGVK), false}, - {"decode known obj into unknown", oneComplex, false, &runtime.Unknown{}, newUnknown(complexv1Meta, oneComplex), false}, + {"decode unknown obj into unknown", unrecognizedGVK, false, &runtime.Unknown{}, newUnknown(unknownMeta, bytes.TrimPrefix(unrecognizedGVK, yamlSep)), false}, + {"decode known obj into unknown", oneComplex, false, &runtime.Unknown{}, newUnknown(complexv1Meta, bytes.TrimPrefix(oneComplex, yamlSep)), false}, {"no unknown fields", simpleUnknownField, false, &runtimetest.InternalSimple{}, nil, true}, {"no duplicate fields", simpleDuplicateField, false, &runtimetest.InternalSimple{}, nil, true}, {"no unrecognized API version", unrecognizedVersion, false, &runtimetest.InternalSimple{}, nil, true}, @@ -445,7 +452,7 @@ func TestDecodeInto(t *testing.T) { actual := ourserializer.Decoder( WithDefaultsDecode(rt.doDefaulting), - ).DecodeInto(NewYAMLFrameReader(FromBytes(rt.data)), rt.obj) + ).DecodeInto(frame.NewYAMLReader(content.FromBytes(rt.data)), rt.obj) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -486,7 +493,7 @@ func TestDecodeAll(t *testing.T) { objs, actual := ourserializer.Decoder( WithDefaultsDecode(rt.doDefaulting), WithListElementsDecoding(rt.listSplit), - ).DecodeAll(NewYAMLFrameReader(FromBytes(rt.data))) + ).DecodeAll(frame.NewYAMLReader(content.FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -519,7 +526,7 @@ func TestDecodeUnknown(t *testing.T) { expected runtime.Object expectedErr bool }{ - {"Decode unrecognized kinds into runtime.Unknown", unrecognizedGVK, true, newUnknown(unknownMeta, unrecognizedGVK), false}, + {"Decode unrecognized kinds into runtime.Unknown", unrecognizedGVK, true, newUnknown(unknownMeta, bytes.TrimPrefix(unrecognizedGVK, yamlSep)), false}, {"Decode known kinds into known structs", oneComplex, true, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar"}, false}, {"No support for unrecognized", unrecognizedGVK, false, nil, true}, } @@ -528,7 +535,7 @@ func TestDecodeUnknown(t *testing.T) { t.Run(rt.name, func(t2 *testing.T) { obj, actual := ourserializer.Decoder( WithUnknownDecode(rt.unknown), - ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) + ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -543,15 +550,15 @@ func TestRoundtrip(t *testing.T) { tests := []struct { name string data []byte - ct ContentType + ct content.ContentType gv *schema.GroupVersion // use a specific groupversion if set. if nil, then use the default Encode }{ - {"simple yaml", oneSimple, ContentTypeYAML, nil}, - {"complex yaml", oneComplex, ContentTypeYAML, nil}, - {"simple json", simpleJSON, ContentTypeJSON, nil}, - {"complex json", complexJSON, ContentTypeJSON, nil}, - {"crd with objectmeta & comments", oldCRD, ContentTypeYAML, &ext1gv}, // encode as v1alpha1 - {"unknown object", unrecognizedGVK, ContentTypeYAML, nil}, + {"simple yaml", oneSimple, content.ContentTypeYAML, nil}, + {"complex yaml", oneComplex, content.ContentTypeYAML, nil}, + {"simple json", simpleJSON, content.ContentTypeJSON, nil}, + {"complex json", complexJSON, content.ContentTypeJSON, nil}, + {"crd with objectmeta & comments", oldCRD, content.ContentTypeYAML, &ext1gv}, // encode as v1alpha1 + {"unknown object", unrecognizedGVK, content.ContentTypeYAML, nil}, // TODO: Maybe an unit test (case) for a type with ObjectMeta embedded as a pointer being nil // TODO: Make sure that the Encode call (with comments support) doesn't mutate the object state // i.e. doesn't remove the annotation after use so multiple similar encode calls work. @@ -563,16 +570,17 @@ func TestRoundtrip(t *testing.T) { WithConvertToHubDecode(true), WithCommentsDecode(true), WithUnknownDecode(true), - ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) + ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) if err != nil { t2.Errorf("unexpected decode error: %v", err) return } - buf := new(bytes.Buffer) + var buf bytes.Buffer + cw := content.ToBuffer(&buf, content.WithContentType(rt.ct)) if rt.gv == nil { - err = defaultEncoder.Encode(NewFrameWriter(rt.ct, buf), obj) + err = defaultEncoder.Encode(frame.NewRecognizingWriter(cw), obj) } else { - err = defaultEncoder.EncodeForGroupVersion(NewFrameWriter(rt.ct, buf), obj, *rt.gv) + err = defaultEncoder.EncodeForGroupVersion(frame.NewRecognizingWriter(cw), obj, *rt.gv) } actual := buf.Bytes() if err != nil { @@ -684,13 +692,13 @@ testString: bar func TestListRoundtrip(t *testing.T) { objs, err := ourserializer.Decoder( WithCommentsDecode(true), - ).DecodeAll(NewYAMLFrameReader(FromBytes(testList))) + ).DecodeAll(frame.NewYAMLReader(content.FromBytes(testList))) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) - if err := defaultEncoder.Encode(NewFrameWriter(ContentTypeYAML, buf), objs...); err != nil { + if err := defaultEncoder.Encode(frame.NewWriter(content.ContentTypeYAML, buf), objs...); err != nil { t.Fatal(err) } actual := buf.Bytes() From b8031ed4282d093edda3dcb207e2e70d1b8e9fac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:36:16 +0300 Subject: [PATCH 11/19] Update the rest of the codebase to use the new packages --- cmd/sample-app/main.go | 11 ++++++----- cmd/sample-watch/main.go | 9 +++++---- pkg/storage/format.go | 12 ++++++------ pkg/storage/mappedrawstorage.go | 4 ++-- pkg/storage/rawstorage.go | 10 +++++----- pkg/storage/storage.go | 30 ++++++++++++++++++------------ pkg/storage/transaction/git.go | 3 ++- pkg/util/patch/patch.go | 10 ++++++---- pkg/util/patch/patch_test.go | 5 +++-- 9 files changed, 53 insertions(+), 41 deletions(-) diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go index ea119a95..d0f9ae50 100644 --- a/cmd/sample-app/main.go +++ b/cmd/sample-app/main.go @@ -12,9 +12,10 @@ import ( "github.com/weaveworks/libgitops/cmd/common" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/logs" "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage" ) @@ -41,7 +42,7 @@ func run() error { logs.Logger.SetLevel(logrus.InfoLevel) plainStorage := storage.NewGenericStorage( - storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, serializer.ContentTypeYAML), + storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, content.ContentTypeYAML), scheme.Serializer, []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}, ) @@ -59,11 +60,11 @@ func run() error { if err != nil { return err } - var content bytes.Buffer - if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil { + var buf bytes.Buffer + if err := scheme.Serializer.Encoder().Encode(frame.NewJSONWriter(content.ToBuffer(&buf)), obj); err != nil { return err } - return c.JSONBlob(http.StatusOK, content.Bytes()) + return c.JSONBlob(http.StatusOK, buf.Bytes()) }) e.POST("/plain/:name", func(c echo.Context) error { diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index ef1aec0a..2abb1524 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -11,8 +11,9 @@ import ( "github.com/spf13/pflag" "github.com/weaveworks/libgitops/cmd/common" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/logs" - "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/watch" "github.com/weaveworks/libgitops/pkg/storage/watch/update" ) @@ -66,11 +67,11 @@ func run() error { if err != nil { return err } - var content bytes.Buffer - if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil { + var buf bytes.Buffer + if err := scheme.Serializer.Encoder().Encode(frame.NewJSONWriter(content.ToBuffer(&buf)), obj); err != nil { return err } - return c.JSONBlob(http.StatusOK, content.Bytes()) + return c.JSONBlob(http.StatusOK, buf.Bytes()) }) e.PUT("/watch/:name", func(c echo.Context) error { diff --git a/pkg/storage/format.go b/pkg/storage/format.go index 84993ceb..e2bdb2c1 100644 --- a/pkg/storage/format.go +++ b/pkg/storage/format.go @@ -1,16 +1,16 @@ package storage -import "github.com/weaveworks/libgitops/pkg/serializer" +import "github.com/weaveworks/libgitops/pkg/content" // ContentTypes describes the connection between // file extensions and a content types. -var ContentTypes = map[string]serializer.ContentType{ - ".json": serializer.ContentTypeJSON, - ".yaml": serializer.ContentTypeYAML, - ".yml": serializer.ContentTypeYAML, +var ContentTypes = map[string]content.ContentType{ + ".json": content.ContentTypeJSON, + ".yaml": content.ContentTypeYAML, + ".yml": content.ContentTypeYAML, } -func extForContentType(wanted serializer.ContentType) string { +func extForContentType(wanted content.ContentType) string { for ext, ct := range ContentTypes { if ct == wanted { return ext diff --git a/pkg/storage/mappedrawstorage.go b/pkg/storage/mappedrawstorage.go index d41641ce..633da8ef 100644 --- a/pkg/storage/mappedrawstorage.go +++ b/pkg/storage/mappedrawstorage.go @@ -8,7 +8,7 @@ import ( "sync" log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/util" ) @@ -133,7 +133,7 @@ func (r *GenericMappedRawStorage) Checksum(key ObjectKey) (string, error) { return checksumFromModTime(path) } -func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct serializer.ContentType) { +func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct content.ContentType) { if file, err := r.realPath(key); err == nil { ct = ContentTypes[filepath.Ext(file)] // Retrieve the correct format based on the extension } diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go index 93304332..aeddd06c 100644 --- a/pkg/storage/rawstorage.go +++ b/pkg/storage/rawstorage.go @@ -9,8 +9,8 @@ import ( "strconv" "strings" + "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/util" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -36,7 +36,7 @@ type RawStorage interface { // If the resource does not exist, it returns ErrNotFound. Checksum(key ObjectKey) (string, error) // ContentType returns the content type of the contents of the resource indicated by key. - ContentType(key ObjectKey) serializer.ContentType + ContentType(key ObjectKey) content.ContentType // WatchDir returns the path for Watchers to watch changes in. WatchDir() string @@ -45,7 +45,7 @@ type RawStorage interface { GetKey(path string) (ObjectKey, error) } -func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct serializer.ContentType) RawStorage { +func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct content.ContentType) RawStorage { ext := extForContentType(ct) if ext == "" { panic("Invalid content type") @@ -65,7 +65,7 @@ func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct serializer.Cont type GenericRawStorage struct { dir string gv schema.GroupVersion - ct serializer.ContentType + ct content.ContentType ext string } @@ -175,7 +175,7 @@ func (r *GenericRawStorage) Checksum(key ObjectKey) (string, error) { return checksumFromModTime(r.keyPath(key)) } -func (r *GenericRawStorage) ContentType(_ ObjectKey) serializer.ContentType { +func (r *GenericRawStorage) ContentType(_ ObjectKey) content.ContentType { return r.ct } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 4d942324..9cc31d11 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -2,12 +2,14 @@ package storage import ( "bytes" + "context" "errors" "fmt" - "io" "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/filter" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" patchutil "github.com/weaveworks/libgitops/pkg/util/patch" @@ -149,7 +151,7 @@ func (s *GenericStorage) GetMeta(key ObjectKey) (runtime.PartialObject, error) { // TODO: Make sure we don't save a partial object func (s *GenericStorage) write(key ObjectKey, obj runtime.Object) error { // Set the content type based on the format given by the RawStorage, but default to JSON - contentType := serializer.ContentTypeJSON + contentType := content.ContentTypeJSON if ct := s.raw.ContentType(key); len(ct) != 0 { contentType = ct } @@ -160,13 +162,15 @@ func (s *GenericStorage) write(key ObjectKey, obj runtime.Object) error { obj.SetCreationTimestamp(metav1.Now()) } - var objBytes bytes.Buffer - err := s.serializer.Encoder().Encode(serializer.NewFrameWriter(contentType, &objBytes), obj) + var buf bytes.Buffer + // TODO: Multi-frame support + w := frame.ToSingleBuffer(contentType, &buf) + err := s.serializer.Encoder().Encode(w, obj) if err != nil { return err } - return s.raw.Write(key, objBytes.Bytes()) + return s.raw.Write(key, buf.Bytes()) } func (s *GenericStorage) Create(obj runtime.Object) error { @@ -348,7 +352,7 @@ func (s *GenericStorage) identify(obj runtime.Object) runtime.Identifyable { return nil } -func (s *GenericStorage) decode(key ObjectKey, content []byte) (runtime.Object, error) { +func (s *GenericStorage) decode(key ObjectKey, objBytes []byte) (runtime.Object, error) { gvk := key.GetGVK() // Decode the bytes to the internal version of the Object, if desired isInternal := gvk.Version == kruntime.APIVersionInternal @@ -358,7 +362,8 @@ func (s *GenericStorage) decode(key ObjectKey, content []byte) (runtime.Object, logrus.Infof("Decoding with content type %s", ct) obj, err := s.serializer.Decoder( serializer.WithConvertToHubDecode(isInternal), - ).Decode(serializer.NewFrameReader(ct, serializer.FromBytes(content))) + ).Decode(frame.NewSingleReader(ct, content.FromBytes(objBytes))) + // TODO: Multi-frame support if err != nil { return nil, err } @@ -374,9 +379,9 @@ func (s *GenericStorage) decode(key ObjectKey, content []byte) (runtime.Object, return metaObj, nil } -func (s *GenericStorage) decodeMeta(key ObjectKey, content []byte) (runtime.PartialObject, error) { +func (s *GenericStorage) decodeMeta(key ObjectKey, frame []byte) (runtime.PartialObject, error) { gvk := key.GetGVK() - partobjs, err := DecodePartialObjects(serializer.FromBytes(content), s.serializer.Scheme(), false, &gvk) + partobjs, err := DecodePartialObjects(content.FromBytes(frame), s.serializer.Scheme(), false, &gvk) if err != nil { return nil, err } @@ -412,10 +417,11 @@ func (s *GenericStorage) walkKind(kind KindKey, fn func(key ObjectKey, content [ // DecodePartialObjects reads any set of frames from the given ReadCloser, decodes the frames into // PartialObjects, validates that the decoded objects are known to the scheme, and optionally sets a default // group -func DecodePartialObjects(rc io.ReadCloser, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) { - fr := serializer.NewYAMLFrameReader(rc) +func DecodePartialObjects(r content.Reader, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) { + fr := frame.NewYAMLReader(r) - frames, err := serializer.ReadFrameList(fr) + ctx := context.TODO() + frames, err := frame.ListFromReader(ctx, fr) if err != nil { return nil, err } diff --git a/pkg/storage/transaction/git.go b/pkg/storage/transaction/git.go index efc57ab3..5bb2f26d 100644 --- a/pkg/storage/transaction/git.go +++ b/pkg/storage/transaction/git.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/gitdir" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" @@ -144,7 +145,7 @@ func computeMappings(dir string, s storage.Storage) (map[storage.ObjectKey]strin // can automatically subscribe to changes of objects between versions. m := map[storage.ObjectKey]string{} for _, file := range files { - partObjs, err := storage.DecodePartialObjects(serializer.FromFile(file), s.Serializer().Scheme(), false, nil) + partObjs, err := storage.DecodePartialObjects(content.FromFile(file), s.Serializer().Scheme(), false, nil) if err != nil { logrus.Errorf("couldn't decode %q into a partial object: %v", file, err) continue diff --git a/pkg/util/patch/patch.go b/pkg/util/patch/patch.go index 11c29ea8..37ed6228 100644 --- a/pkg/util/patch/patch.go +++ b/pkg/util/patch/patch.go @@ -5,6 +5,8 @@ import ( "fmt" "io/ioutil" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" "k8s.io/apimachinery/pkg/runtime/schema" @@ -31,7 +33,7 @@ func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) encoder := p.serializer.Encoder() old := new.DeepCopyObject().(runtime.Object) - if err = encoder.Encode(serializer.NewJSONFrameWriter(&oldBytes), old); err != nil { + if err = encoder.Encode(frame.NewJSONWriter(content.NewWriter(&oldBytes)), old); err != nil { return } @@ -39,7 +41,7 @@ func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) return } - if err = encoder.Encode(serializer.NewJSONFrameWriter(&newBytes), new); err != nil { + if err = encoder.Encode(frame.NewJSONWriter(content.NewWriter(&newBytes)), new); err != nil { return } @@ -89,13 +91,13 @@ func (p *patcher) ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVer // with the serializer so it conforms to a runtime.Object // TODO: Just use encoding/json.Indent here instead? func (p *patcher) serializerEncode(input []byte) ([]byte, error) { - obj, err := p.serializer.Decoder().Decode(serializer.NewJSONFrameReader(serializer.FromBytes(input))) + obj, err := p.serializer.Decoder().Decode(frame.NewJSONReader(content.FromBytes(input))) if err != nil { return nil, err } var result bytes.Buffer - if err := p.serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&result), obj); err != nil { + if err := p.serializer.Encoder().Encode(frame.NewJSONWriter(content.NewWriter(&result)), obj); err != nil { return nil, err } diff --git a/pkg/util/patch/patch_test.go b/pkg/util/patch/patch_test.go index 9a3cf542..2453d7e3 100644 --- a/pkg/util/patch/patch_test.go +++ b/pkg/util/patch/patch_test.go @@ -6,8 +6,9 @@ import ( api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/serializer" ) var ( @@ -53,7 +54,7 @@ func TestApplyPatch(t *testing.T) { if err != nil { t.Fatal(err) } - frameReader := serializer.NewJSONFrameReader(serializer.FromBytes(result)) + frameReader := frame.NewJSONReader(content.FromBytes(result)) if err := scheme.Serializer.Decoder().DecodeInto(frameReader, &api.Car{}); err != nil { t.Fatal(err) } From b665999dbf9d072a8375e0a0a9dfaee446506e9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:36:23 +0300 Subject: [PATCH 12/19] fix nit --- pkg/tracing/tracer_provider.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/tracing/tracer_provider.go b/pkg/tracing/tracer_provider.go index 92a8d16d..e79c4591 100644 --- a/pkg/tracing/tracer_provider.go +++ b/pkg/tracing/tracer_provider.go @@ -17,6 +17,12 @@ import ( "go.uber.org/multierr" ) +// TODO: Figure out how to unit-test this creation flow, as one cannot compare the +// returned tracerProviders due to internal fields. + +// ErrNoExportersProvided describes that no exporters where provided when building +var ErrNoExportersProvided = errors.New("no exporters provided") + // SDKTracerProvider represents a TracerProvider that is generated from the OpenTelemetry // SDK and hence can be force-flushed and shutdown (which in both cases flushes all async, // batched traces before stopping). @@ -146,8 +152,6 @@ func (b *builder) WithLogging(log bool) TracerProviderBuilder { return b } -var ErrNoExportersProvided = errors.New("no exporters provided") - func (b *builder) Build() (SDKTracerProvider, error) { // Combine and filter the errors from the exporter building if err := multierr.Combine(b.errs...); err != nil { @@ -156,7 +160,6 @@ func (b *builder) Build() (SDKTracerProvider, error) { if len(b.exporters) == 0 { return nil, ErrNoExportersProvided } - // TODO: Require at least one exporter // By default, set the service name to "libgitops". // This can be overridden through WithAttributes From b88534dbbda2e2a2452b085cc7c1c85b134979e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:36:38 +0300 Subject: [PATCH 13/19] Update golangci-lint --- .github/workflows/golangci-lint.yaml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 8b8c4d55..f2f54e6e 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -1,18 +1,21 @@ # https://github.com/marketplace/actions/run-golangci-lint name: golangci-lint - on: - pull_request: push: - branches: [master] - + tags: + - v* + branches: + - master + - main + pull_request: jobs: golangci: - name: Linter + name: lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: golangci-lint - uses: golangci/golangci-lint-action@v1 + uses: golangci/golangci-lint-action@v2 with: - version: v1.28 \ No newline at end of file + # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version + version: v1.29 \ No newline at end of file From d2faee2f2126af56a6b3e8b4d5616193c9e11d2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 13:38:22 +0300 Subject: [PATCH 14/19] Update dependencies, and mark where they come from --- Makefile | 2 +- go.mod | 46 +++-- go.sum | 519 +++++++++++++++++++++++++++++++++++++++++++------------ 3 files changed, 449 insertions(+), 118 deletions(-) diff --git a/Makefile b/Makefile index 4f3230b9..b0880974 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ UID_GID ?= $(shell id -u):$(shell id -g) -GO_VERSION ?= 1.14.4 +GO_VERSION ?= 1.16.5 GIT_VERSION := $(shell hack/ldflags.sh --version-only) PROJECT := github.com/weaveworks/libgitops BOUNDING_API_DIRS := ${PROJECT}/cmd/apis/sample diff --git a/go.mod b/go.mod index c03013fb..5d9d8013 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,21 @@ module github.com/weaveworks/libgitops -go 1.14 +go 1.16 replace ( github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible - github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.3.0 + // Keep this version in sync with the Kubernetes version by looking at + // https://github.com/kubernetes/apimachinery/blob/v0.21.2/go.mod#L17 + github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1 ) require ( github.com/fluxcd/go-git-providers v0.0.2 github.com/fluxcd/toolkit v0.0.1-beta.2 github.com/go-git/go-git/v5 v5.1.0 + // Keep this in sync with Kubernetes by checking + // https://github.com/kubernetes-sigs/controller-runtime/blob/v0.9.2/go.mod + github.com/go-logr/logr v0.4.0 github.com/go-openapi/spec v0.19.8 github.com/google/go-github/v32 v32.1.0 github.com/labstack/echo v3.3.10+incompatible @@ -18,14 +23,35 @@ require ( github.com/mattn/go-isatty v0.0.12 // indirect github.com/mitchellh/go-homedir v1.1.0 github.com/rjeczalik/notify v0.9.2 - github.com/sirupsen/logrus v1.6.0 + github.com/sirupsen/logrus v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.6.1 - golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect - golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d - k8s.io/apimachinery v0.18.6 - k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 - sigs.k8s.io/controller-runtime v0.6.0 - sigs.k8s.io/kustomize/kyaml v0.1.11 + github.com/stretchr/testify v1.7.0 + // Keep all OTel imports the same version + go.opentelemetry.io/otel v1.0.0-RC1 + go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC1 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC1 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC1 + go.opentelemetry.io/otel/sdk v1.0.0-RC1 + go.opentelemetry.io/otel/trace v1.0.0-RC1 + go.uber.org/multierr v1.6.0 + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 + gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + // Use the latest available Kubernetes version. + k8s.io/apimachinery v0.21.2 + // Keep this in sync with the Kubernetes version by checking + // https://github.com/kubernetes/apimachinery/blob/v0.21.2/go.mod + k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e + // Keep this in sync with the Kubernetes version by checking + // https://github.com/kubernetes/kubernetes/blob/v1.21.2/go.mod#L527 + k8s.io/utils v0.0.0-20210527160623-6fdb442a123b + // Keep this in sync with Kubernetes by checking what controller-runtime + // version uses the right Kubernetes version, e.g. + // https://github.com/kubernetes-sigs/controller-runtime/blob/v0.9.2/go.mod + sigs.k8s.io/controller-runtime v0.9.2 + // TODO: When a new kyaml version is released, use that (we need the sequence + // auto-indentation features) + sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 + // Keep this in sync with Kubernetes by checking + // https://github.com/kubernetes/apimachinery/blob/v0.21.2/go.mod#L40 sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index c1ecf376..d7116806 100644 --- a/go.sum +++ b/go.sum @@ -2,15 +2,43 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxo cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= @@ -21,15 +49,14 @@ github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHS github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg= -github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -38,12 +65,19 @@ github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4Rq github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -51,21 +85,26 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -79,6 +118,7 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -92,6 +132,7 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -110,22 +151,27 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -135,9 +181,8 @@ github.com/fluxcd/kustomize-controller v0.0.1-beta.2/go.mod h1:mLeipvpQkyof6b5IH github.com/fluxcd/source-controller v0.0.1-beta.2/go.mod h1:tmscNdCxEt7+Xt2g1+bI38hMPw2leYMFAaCn4UlMGuw= github.com/fluxcd/toolkit v0.0.1-beta.2 h1:JG80AUIGd936QJ6Vs/xZweoKcE6j7Loua5Wn6Q/pVh8= github.com/fluxcd/toolkit v0.0.1-beta.2/go.mod h1:NqDXj2aeVMbVkrCHeP/r0um+edXXyeGlG/9pKZLqGdM= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -159,14 +204,23 @@ github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1 github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= github.com/go-git/go-git/v5 v5.1.0 h1:HxJn9g/E7eYvKW3Fm7Jt4ee8LXfPOm/H1cdDu8vEssk= github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -176,19 +230,15 @@ github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2 github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1 h1:wSt/4CYxs70xbATrGXhokKF1i0tZjENLOo1ioIO13zk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= @@ -200,14 +250,11 @@ github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCs github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= @@ -216,11 +263,9 @@ github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pL github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -229,6 +274,7 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= @@ -249,28 +295,39 @@ github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= @@ -292,35 +349,43 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA// github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github/v32 v32.0.0 h1:q74KVb22spUq0U5HqZ9VCYqQz8YRuOtL/39ZnfwO+NM= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.0.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -331,61 +396,84 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -400,11 +488,10 @@ github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a h1:TpvdAwDAt1K4ANVOfcihouRdvP+MgAfDWwBuct4l6ZY= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= @@ -412,23 +499,22 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -436,9 +522,14 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -447,35 +538,39 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= @@ -483,6 +578,7 @@ github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5X github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -490,29 +586,35 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= @@ -520,9 +622,12 @@ github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uY github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= @@ -535,10 +640,12 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sosedoff/gitkit v0.2.1-0.20191202022816-7182d43c6254/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= @@ -550,6 +657,7 @@ github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKv github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -559,6 +667,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -566,12 +675,13 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= @@ -602,56 +712,111 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yujunz/go-getter v1.4.1-lite/go.mod h1:sbmqxXjyLunH1PkF3n7zSlnVeMvmYUuIl9ZVs/7NyCc= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v1.0.0-RC1 h1:4CeoX93DNTWt8awGK9JmNXzF9j7TyOu9upscEdtcdXc= +go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= +go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC1 h1:tVhw2BMSAk248rhdeirOe9hlXKwGHDvVtF7P8F+H2DU= +go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC1/go.mod h1:FXJnjGCoTQL6nQ8OpFJ0JI1DrdOvMoVx49ic0Hg4+D4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC1 h1:GHKxjc4EDldz8ScMDpiNwX4BAub6wGFUUo5Axm2BimU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC1/go.mod h1:FliQjImlo7emZVjixV8nbDMAa4iAkcWTE9zzSEOiEPw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC1 h1:ZOQXuxKJ9evGspu3LvbZxx3KOOQvKAPBJVMOfGf1cOM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC1/go.mod h1:cDwRc2Jrh5Gku1peGK8p9rRuX/Uq2OtVmLicjlw2WYU= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC1 h1:SEfJImgKQ5TP2aTJwN08qhS8oFlYWr/neECGsyuxKWg= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC1/go.mod h1:TAM/UYjVd1UdaifWkof3qj9cCW9oINemHfj0K6yodSo= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1 h1:G685iP3XiskCwk/z0eIabL55XUl2gk0cljhGk9sB0Yk= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= +go.opentelemetry.io/otel/sdk v1.0.0-RC1 h1:Sy2VLOOg24bipyC29PhuMXYNJrLsxkie8hyI7kUlG9Q= +go.opentelemetry.io/otel/sdk v1.0.0-RC1/go.mod h1:kj6yPn7Pgt5ByRuwesbaWcRLA+V7BSDg3Hf8xRvsvf8= +go.opentelemetry.io/otel/trace v1.0.0-RC1 h1:jrjqKJZEibFrDz+umEASeU3LvdVyWKlnTh7XEfwrT58= +go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -659,49 +824,65 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -713,37 +894,68 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d h1:QQrM/CCYEzTs91GZylDCQjGHudbPTxF/1fvXdVh5lMo= -golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -760,69 +972,140 @@ golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff h1:XdBG6es/oFDr1HwaxkxgVve7NB281QhxgK/i4voubFs= golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -832,69 +1115,84 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= +k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= -k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= +k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -903,23 +1201,30 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= -sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM= sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= +sigs.k8s.io/controller-runtime v0.9.2 h1:MnCAsopQno6+hI9SgJHKddzXpmv2wtouZz6931Eax+Q= +sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.4.1/go.mod h1:NqxqT+wbYHrD0P19Uu4dXiMsVwI1IwQs+MJHlLhmPqQ= -sigs.k8s.io/kustomize/kyaml v0.1.11 h1:/VvWxVIgH5gG1K4A7trgbyLgO3tRBiAWNhLFVU1HEmo= sigs.k8s.io/kustomize/kyaml v0.1.11/go.mod h1:72/rLkSi+L/pHM1oCjwrf3ClU+tH5kZQvvdLSqIHwWU= +sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 h1:Nkg3viu9IE/TSzvYt4GGy5FkhdPk3bptXuxW5TnU9uo= +sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From 9b8209381c3284b508894991bfbea9dc04b2512d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 15:23:34 +0300 Subject: [PATCH 15/19] Remove some old OpenAPI generation that is not needed at the moment. --- Makefile | 20 ++- api/openapi/openapi_generated.go | 277 ------------------------------- api/openapi/violations.txt | 0 go.mod | 4 - go.sum | 3 - 5 files changed, 11 insertions(+), 293 deletions(-) delete mode 100644 api/openapi/openapi_generated.go delete mode 100644 api/openapi/violations.txt diff --git a/Makefile b/Makefile index b0880974..ffa1e793 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ tidy-internal: /go/bin/goimports goimports -w ${SRC_PKGS} autogen: docker-autogen-internal -autogen-internal: /go/bin/deepcopy-gen /go/bin/defaulter-gen /go/bin/conversion-gen /go/bin/openapi-gen +autogen-internal: /go/bin/deepcopy-gen /go/bin/defaulter-gen /go/bin/conversion-gen # Let the boilerplate be empty touch /tmp/boilerplate @@ -63,12 +63,13 @@ autogen-internal: /go/bin/deepcopy-gen /go/bin/defaulter-gen /go/bin/conversion- --input-dirs ${API_DIRS} \ -O zz_generated.conversion \ -h /tmp/boilerplate - - /go/bin/openapi-gen \ - --input-dirs ${API_DIRS} \ - --output-package ${PROJECT}/api/openapi \ - --report-filename api/openapi/violations.txt \ - -h /tmp/boilerplate + +# Uncomment this if you'd like to enable OpenAPI generation for the types. +# /go/bin/openapi-gen \ +# --input-dirs ${API_DIRS} \ +# --output-package ${PROJECT}/api/openapi \ +# --report-filename api/openapi/violations.txt \ +# -h /tmp/boilerplate # These commands modify the environment, perform cleanup $(MAKE) tidy-internal @@ -76,8 +77,9 @@ autogen-internal: /go/bin/deepcopy-gen /go/bin/defaulter-gen /go/bin/conversion- /go/bin/deepcopy-gen /go/bin/defaulter-gen /go/bin/conversion-gen: /go/bin/%: go get k8s.io/code-generator/cmd/$* -/go/bin/openapi-gen: - go get k8s.io/kube-openapi/cmd/openapi-gen +# Uncomment this if you'd like to enable OpenAPI generation for the types. +#/go/bin/openapi-gen: +# go get k8s.io/kube-openapi/cmd/openapi-gen /go/bin/goimports: go get golang.org/x/tools/cmd/goimports diff --git a/api/openapi/openapi_generated.go b/api/openapi/openapi_generated.go deleted file mode 100644 index 2e315138..00000000 --- a/api/openapi/openapi_generated.go +++ /dev/null @@ -1,277 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by openapi-gen. DO NOT EDIT. - -// This file was autogenerated by openapi-gen. Do not edit it manually! - -package openapi - -import ( - spec "github.com/go-openapi/spec" - common "k8s.io/kube-openapi/pkg/common" -) - -func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { - return map[string]common.OpenAPIDefinition{ - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.Car": schema_sample_app_apis_sample_v1alpha1_Car(ref), - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.CarSpec": schema_sample_app_apis_sample_v1alpha1_CarSpec(ref), - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.CarStatus": schema_sample_app_apis_sample_v1alpha1_CarStatus(ref), - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.Motorcycle": schema_sample_app_apis_sample_v1alpha1_Motorcycle(ref), - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.MotorcycleSpec": schema_sample_app_apis_sample_v1alpha1_MotorcycleSpec(ref), - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.MotorcycleStatus": schema_sample_app_apis_sample_v1alpha1_MotorcycleStatus(ref), - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.VehicleStatus": schema_sample_app_apis_sample_v1alpha1_VehicleStatus(ref), - } -} - -func schema_sample_app_apis_sample_v1alpha1_Car(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Car represents a car", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "runtime.ObjectMeta is also embedded into the struct, and defines the human-readable name, and the machine-readable ID Name is available at the .metadata.name JSON path ID is available at the .metadata.uid JSON path (the Go type is k8s.io/apimachinery/pkg/types.UID, which is only a typed string)", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.CarSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.CarStatus"), - }, - }, - }, - Required: []string{"metadata", "spec", "status"}, - }, - }, - Dependencies: []string{ - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.CarSpec", "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.CarStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_sample_app_apis_sample_v1alpha1_CarSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "engine": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "yearModel": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "brand": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"engine", "yearModel", "brand"}, - }, - }, - } -} - -func schema_sample_app_apis_sample_v1alpha1_CarStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "speed": { - SchemaProps: spec.SchemaProps{ - Type: []string{"number"}, - Format: "double", - }, - }, - "acceleration": { - SchemaProps: spec.SchemaProps{ - Type: []string{"number"}, - Format: "double", - }, - }, - "distance": { - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", - }, - }, - "persons": { - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - Required: []string{"speed", "acceleration", "distance", "persons"}, - }, - }, - } -} - -func schema_sample_app_apis_sample_v1alpha1_Motorcycle(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Motorcycle represents a motorcycle", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "runtime.ObjectMeta is also embedded into the struct, and defines the human-readable name, and the machine-readable ID Name is available at the .metadata.name JSON path ID is available at the .metadata.uid JSON path (the Go type is k8s.io/apimachinery/pkg/types.UID, which is only a typed string)", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.MotorcycleSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.MotorcycleStatus"), - }, - }, - }, - Required: []string{"metadata", "spec", "status"}, - }, - }, - Dependencies: []string{ - "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.MotorcycleSpec", "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1.MotorcycleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_sample_app_apis_sample_v1alpha1_MotorcycleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "color": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "bodyType": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"color", "bodyType"}, - }, - }, - } -} - -func schema_sample_app_apis_sample_v1alpha1_MotorcycleStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "speed": { - SchemaProps: spec.SchemaProps{ - Type: []string{"number"}, - Format: "double", - }, - }, - "acceleration": { - SchemaProps: spec.SchemaProps{ - Type: []string{"number"}, - Format: "double", - }, - }, - "distance": { - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", - }, - }, - "currentWeight": { - SchemaProps: spec.SchemaProps{ - Type: []string{"number"}, - Format: "double", - }, - }, - }, - Required: []string{"speed", "acceleration", "distance", "currentWeight"}, - }, - }, - } -} - -func schema_sample_app_apis_sample_v1alpha1_VehicleStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "speed": { - SchemaProps: spec.SchemaProps{ - Type: []string{"number"}, - Format: "double", - }, - }, - "acceleration": { - SchemaProps: spec.SchemaProps{ - Type: []string{"number"}, - Format: "double", - }, - }, - "distance": { - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - Required: []string{"speed", "acceleration", "distance"}, - }, - }, - } -} diff --git a/api/openapi/violations.txt b/api/openapi/violations.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/go.mod b/go.mod index 5d9d8013..b306115c 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,6 @@ require ( // Keep this in sync with Kubernetes by checking // https://github.com/kubernetes-sigs/controller-runtime/blob/v0.9.2/go.mod github.com/go-logr/logr v0.4.0 - github.com/go-openapi/spec v0.19.8 github.com/google/go-github/v32 v32.1.0 github.com/labstack/echo v3.3.10+incompatible github.com/labstack/gommon v0.3.0 // indirect @@ -39,9 +38,6 @@ require ( // Use the latest available Kubernetes version. k8s.io/apimachinery v0.21.2 // Keep this in sync with the Kubernetes version by checking - // https://github.com/kubernetes/apimachinery/blob/v0.21.2/go.mod - k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e - // Keep this in sync with the Kubernetes version by checking // https://github.com/kubernetes/kubernetes/blob/v1.21.2/go.mod#L527 k8s.io/utils v0.0.0-20210527160623-6fdb442a123b // Keep this in sync with Kubernetes by checking what controller-runtime diff --git a/go.sum b/go.sum index d7116806..7f2f92c2 100644 --- a/go.sum +++ b/go.sum @@ -158,7 +158,6 @@ github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= @@ -256,8 +255,6 @@ github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= From a643533ba005b42958de82bc65caaca942823477 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 30 Jul 2021 11:22:21 +0300 Subject: [PATCH 16/19] Rename pkg/content to pkg/stream --- cmd/sample-app/main.go | 5 +- cmd/sample-watch/main.go | 3 +- pkg/frame/constructors.go | 68 +++++++------- pkg/frame/interfaces.go | 36 +++---- pkg/frame/k8s_reader_streaming.go | 4 +- pkg/frame/options.go | 8 +- pkg/frame/reader.go | 12 +-- pkg/frame/reader_factory.go | 26 ++--- pkg/frame/reader_single.go | 12 +-- pkg/frame/reader_streaming.go | 30 +++--- pkg/frame/reader_test.go | 94 +++++++++---------- pkg/frame/sanitize/sanitize.go | 18 ++-- pkg/frame/sanitize/sanitize_test.go | 54 +++++------ pkg/frame/utils_test.go | 14 +-- pkg/frame/writer.go | 8 +- pkg/frame/writer_delegate.go | 16 ++-- pkg/frame/writer_factory.go | 20 ++-- pkg/frame/writer_test.go | 8 +- pkg/serializer/comments.go | 10 +- pkg/serializer/decode.go | 10 +- pkg/serializer/encode.go | 12 +-- pkg/serializer/serializer.go | 4 +- pkg/serializer/serializer_test.go | 50 +++++----- pkg/storage/format.go | 12 +-- pkg/storage/mappedrawstorage.go | 4 +- pkg/storage/rawstorage.go | 10 +- pkg/storage/storage.go | 12 +-- pkg/storage/transaction/git.go | 4 +- pkg/{content => stream}/constructors.go | 8 +- pkg/{content => stream}/errors.go | 2 +- pkg/{content => stream}/interfaces.go | 6 +- pkg/{content => stream}/metadata.go | 4 +- pkg/{content => stream}/metadata/metadata.go | 12 +-- .../metadata/metadata_test.go | 0 pkg/{content => stream}/reader.go | 4 +- pkg/{content => stream}/reader_test.go | 2 +- pkg/{content => stream}/recognizing.go | 4 +- .../recognizing_reader_test.go | 2 +- pkg/{content => stream}/recognizing_test.go | 4 +- pkg/{content => stream}/segment_reader.go | 2 +- pkg/{content => stream}/tracing.go | 2 +- pkg/{content => stream}/writer.go | 4 +- pkg/util/patch/patch.go | 10 +- pkg/util/patch/patch_test.go | 4 +- 44 files changed, 316 insertions(+), 318 deletions(-) rename pkg/{content => stream}/constructors.go (94%) rename pkg/{content => stream}/errors.go (98%) rename pkg/{content => stream}/interfaces.go (96%) rename pkg/{content => stream}/metadata.go (97%) rename pkg/{content => stream}/metadata/metadata.go (93%) rename pkg/{content => stream}/metadata/metadata_test.go (100%) rename pkg/{content => stream}/reader.go (98%) rename pkg/{content => stream}/reader_test.go (98%) rename pkg/{content => stream}/recognizing.go (98%) rename pkg/{content => stream}/recognizing_reader_test.go (98%) rename pkg/{content => stream}/recognizing_test.go (96%) rename pkg/{content => stream}/segment_reader.go (99%) rename pkg/{content => stream}/tracing.go (98%) rename pkg/{content => stream}/writer.go (97%) diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go index d0f9ae50..692aa074 100644 --- a/cmd/sample-app/main.go +++ b/cmd/sample-app/main.go @@ -12,7 +12,6 @@ import ( "github.com/weaveworks/libgitops/cmd/common" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/logs" "github.com/weaveworks/libgitops/pkg/runtime" @@ -42,7 +41,7 @@ func run() error { logs.Logger.SetLevel(logrus.InfoLevel) plainStorage := storage.NewGenericStorage( - storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, content.ContentTypeYAML), + storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, stream.ContentTypeYAML), scheme.Serializer, []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}, ) @@ -61,7 +60,7 @@ func run() error { return err } var buf bytes.Buffer - if err := scheme.Serializer.Encoder().Encode(frame.NewJSONWriter(content.ToBuffer(&buf)), obj); err != nil { + if err := scheme.Serializer.Encoder().Encode(frame.NewJSONWriter(stream.ToBuffer(&buf)), obj); err != nil { return err } return c.JSONBlob(http.StatusOK, buf.Bytes()) diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index 2abb1524..d8cc6b95 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -11,7 +11,6 @@ import ( "github.com/spf13/pflag" "github.com/weaveworks/libgitops/cmd/common" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/logs" "github.com/weaveworks/libgitops/pkg/storage/watch" @@ -68,7 +67,7 @@ func run() error { return err } var buf bytes.Buffer - if err := scheme.Serializer.Encoder().Encode(frame.NewJSONWriter(content.ToBuffer(&buf)), obj); err != nil { + if err := scheme.Serializer.Encoder().Encode(frame.NewJSONWriter(stream.ToBuffer(&buf)), obj); err != nil { return err } return c.JSONBlob(http.StatusOK, buf.Bytes()) diff --git a/pkg/frame/constructors.go b/pkg/frame/constructors.go index 6e8ebe49..e9776b09 100644 --- a/pkg/frame/constructors.go +++ b/pkg/frame/constructors.go @@ -4,101 +4,101 @@ import ( "bytes" "context" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" ) // 2 generic Reader constructors -func NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader { +func NewSingleReader(ct stream.ContentType, r stream.Reader, opts ...SingleReaderOption) Reader { return internalFactoryVar.NewSingleReader(ct, r, opts...) } -func NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader { +func NewRecognizingReader(ctx context.Context, r stream.Reader, opts ...RecognizingReaderOption) Reader { return internalFactoryVar.NewRecognizingReader(ctx, r, opts...) } // 4 JSON-YAML Reader constructors using the default factory -func NewYAMLReader(r content.Reader, opts ...ReaderOption) Reader { - return internalFactoryVar.NewReader(content.ContentTypeYAML, r, opts...) +func NewYAMLReader(r stream.Reader, opts ...ReaderOption) Reader { + return internalFactoryVar.NewReader(stream.ContentTypeYAML, r, opts...) } -func NewJSONReader(r content.Reader, opts ...ReaderOption) Reader { - return internalFactoryVar.NewReader(content.ContentTypeJSON, r, opts...) +func NewJSONReader(r stream.Reader, opts ...ReaderOption) Reader { + return internalFactoryVar.NewReader(stream.ContentTypeJSON, r, opts...) } -func NewSingleYAMLReader(r content.Reader, opts ...SingleReaderOption) Reader { - return NewSingleReader(content.ContentTypeYAML, r, opts...) +func NewSingleYAMLReader(r stream.Reader, opts ...SingleReaderOption) Reader { + return NewSingleReader(stream.ContentTypeYAML, r, opts...) } -func NewSingleJSONReader(r content.Reader, opts ...SingleReaderOption) Reader { - return NewSingleReader(content.ContentTypeJSON, r, opts...) +func NewSingleJSONReader(r stream.Reader, opts ...SingleReaderOption) Reader { + return NewSingleReader(stream.ContentTypeJSON, r, opts...) } // 2 generic Writer constructors -func NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer { +func NewSingleWriter(ct stream.ContentType, w stream.Writer, opts ...SingleWriterOption) Writer { return internalFactoryVar.NewSingleWriter(ct, w, opts...) } -func NewRecognizingWriter(r content.Writer, opts ...RecognizingWriterOption) Writer { +func NewRecognizingWriter(r stream.Writer, opts ...RecognizingWriterOption) Writer { return internalFactoryVar.NewRecognizingWriter(r, opts...) } // 4 JSON-YAML Writer constructors using the default factory -func NewYAMLWriter(r content.Writer, opts ...WriterOption) Writer { - return internalFactoryVar.NewWriter(content.ContentTypeYAML, r, opts...) +func NewYAMLWriter(r stream.Writer, opts ...WriterOption) Writer { + return internalFactoryVar.NewWriter(stream.ContentTypeYAML, r, opts...) } -func NewJSONWriter(r content.Writer, opts ...WriterOption) Writer { - return internalFactoryVar.NewWriter(content.ContentTypeJSON, r, opts...) +func NewJSONWriter(r stream.Writer, opts ...WriterOption) Writer { + return internalFactoryVar.NewWriter(stream.ContentTypeJSON, r, opts...) } -func NewSingleYAMLWriter(r content.Writer, opts ...SingleWriterOption) Writer { - return internalFactoryVar.NewSingleWriter(content.ContentTypeYAML, r, opts...) +func NewSingleYAMLWriter(r stream.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(stream.ContentTypeYAML, r, opts...) } -func NewSingleJSONWriter(r content.Writer, opts ...SingleWriterOption) Writer { - return internalFactoryVar.NewSingleWriter(content.ContentTypeJSON, r, opts...) +func NewSingleJSONWriter(r stream.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(stream.ContentTypeJSON, r, opts...) } -// 1 single, 3 YAML and 1 recognizing content.Reader helper constructors +// 1 single, 3 YAML and 1 recognizing stream.Reader helper constructors -/*func FromSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleReaderOption) Reader { - return NewSingleReader(ct, content.FromBuffer(buf), opts...) +/*func FromSingleBuffer(ct stream.ContentType, buf *bytes.Buffer, opts ...SingleReaderOption) Reader { + return NewSingleReader(ct, stream.FromBuffer(buf), opts...) }*/ func FromYAMLBytes(yamlBytes []byte, opts ...ReaderOption) Reader { - return NewYAMLReader(content.FromBytes(yamlBytes), opts...) + return NewYAMLReader(stream.FromBytes(yamlBytes), opts...) } func FromYAMLString(yamlStr string, opts ...ReaderOption) Reader { - return NewYAMLReader(content.FromString(yamlStr), opts...) + return NewYAMLReader(stream.FromString(yamlStr), opts...) } func FromYAMLFile(filePath string, opts ...ReaderOption) Reader { - return NewYAMLReader(content.FromFile(filePath), opts...) + return NewYAMLReader(stream.FromFile(filePath), opts...) } func FromFile(ctx context.Context, filePath string, opts ...RecognizingReaderOption) Reader { - return NewRecognizingReader(ctx, content.FromFile(filePath), opts...) + return NewRecognizingReader(ctx, stream.FromFile(filePath), opts...) } -// 1 single, 2 YAML and 1 recognizing content.Writer helper constructors +// 1 single, 2 YAML and 1 recognizing stream.Writer helper constructors -func ToSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleWriterOption) Writer { - return NewSingleWriter(ct, content.ToBuffer(buf), opts...) +func ToSingleBuffer(ct stream.ContentType, buf *bytes.Buffer, opts ...SingleWriterOption) Writer { + return NewSingleWriter(ct, stream.ToBuffer(buf), opts...) } func ToYAMLBuffer(buf *bytes.Buffer, opts ...WriterOption) Writer { - return NewYAMLWriter(content.NewWriter(buf), opts...) + return NewYAMLWriter(stream.NewWriter(buf), opts...) } func ToYAMLFile(filePath string, opts ...WriterOption) Writer { - return NewYAMLWriter(content.ToFile(filePath), opts...) + return NewYAMLWriter(stream.ToFile(filePath), opts...) } func ToFile(filePath string, opts ...RecognizingWriterOption) Writer { - return NewRecognizingWriter(content.ToFile(filePath), opts...) + return NewRecognizingWriter(stream.ToFile(filePath), opts...) } diff --git a/pkg/frame/interfaces.go b/pkg/frame/interfaces.go index 363e166a..5dcc4156 100644 --- a/pkg/frame/interfaces.go +++ b/pkg/frame/interfaces.go @@ -3,7 +3,7 @@ package frame import ( "context" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" ) // TODO: Maybe implement/use context-aware (cancellable) io.Readers and io.Writers underneath? @@ -50,13 +50,13 @@ type Closer interface { // Returned errors (including io.EOF) MUST be checked for equality using // errors.Is(err, target), NOT using err == target. // -// TODO: Say that the ContentType is assumed constant per content.Reader +// TODO: Say that the ContentType is assumed constant per stream.Reader // // The Reader MAY respect cancellation signals on the context, depending on ReaderOptions. // The Reader MAY support reporting trace spans for how long certain operations take. type Reader interface { // The Reader is specific to possibly multiple framing types - content.ContentTyped + stream.ContentTyped // ReadFrame reads one frame from the underlying io.Read(Clos)er. At maximum, the frame is as // large as ReadWriterOptions.MaxFrameSize. See the documentation on the Reader interface for more @@ -64,7 +64,7 @@ type Reader interface { ReadFrame(ctx context.Context) ([]byte, error) // Exposes Metadata about the underlying io.Reader - content.MetadataContainer + stream.MetadataContainer // The Reader can be closed. If an underlying io.Reader is used, this is a no-op. If an // io.ReadCloser is used, this will close that io.ReadCloser. @@ -75,20 +75,20 @@ type Reader interface { type ReaderFactory interface { // ct is dominant; will error if r has a conflicting content type // ct must be one of the supported content types - NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader + NewReader(ct stream.ContentType, r stream.Reader, opts ...ReaderOption) Reader // opts.MaxFrameCount is dominant, will always be set to 1 // ct can be anything // ct is dominant; will error if r has a conflicting content type // Single options should not have MaxFrameCount at all, if possible - NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader + NewSingleReader(ct stream.ContentType, r stream.Reader, opts ...SingleReaderOption) Reader // will use the content type from r if set, otherwise infer from content metadata - // or peek bytes using the content.ContentTypeRecognizer + // or peek bytes using the stream.ContentTypeRecognizer // should add to options for a recognizer - NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader + NewRecognizingReader(ctx context.Context, r stream.Reader, opts ...RecognizingReaderOption) Reader // The SupportedContentTypes() method specifies what content types are supported by the // ReaderFactory - content.ContentTypeSupporter + stream.ContentTypeSupporter } // Writer is a framing type specific writer to an underlying io.Writer or io.WriteCloser. @@ -122,16 +122,16 @@ type ReaderFactory interface { // The Writer MAY respect cancellation signals on the context, depending on WriterOptions. // The Writer MAY support reporting trace spans for how long certain operations take. // -// TODO: Say that the ContentType is assumed constant per content.Writer +// TODO: Say that the ContentType is assumed constant per stream.Writer type Writer interface { // The Writer is specific to this framing type. - content.ContentTyped + stream.ContentTyped // WriteFrame writes one frame to the underlying io.Write(Close)r. // See the documentation on the Writer interface for more details. WriteFrame(ctx context.Context, frame []byte) error - // Exposes metadata from the underlying content.Writer - content.MetadataContainer + // Exposes metadata from the underlying stream.Writer + stream.MetadataContainer // The Writer can be closed. If an underlying io.Writer is used, this is a no-op. If an // io.WriteCloser is used, this will close that io.WriteCloser. @@ -142,20 +142,20 @@ type Writer interface { type WriterFactory interface { // ct is dominant; will error if r has a conflicting content type // ct must be one of the supported content types - NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer + NewWriter(ct stream.ContentType, w stream.Writer, opts ...WriterOption) Writer // opts.MaxFrameCount is dominant, will always be set to 1 // ct can be anything // ct is dominant; will error if r has a conflicting content type // Single options should not have MaxFrameCount at all, if possible - NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer + NewSingleWriter(ct stream.ContentType, w stream.Writer, opts ...SingleWriterOption) Writer // will use the content type from r if set, otherwise infer from content metadata - // using the content.ContentTypeRecognizer + // using the stream.ContentTypeRecognizer // should add to options for a recognizer - NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer + NewRecognizingWriter(w stream.Writer, opts ...RecognizingWriterOption) Writer // The SupportedContentTypes() method specifies what content types are supported by the // WriterFactory - content.ContentTypeSupporter + stream.ContentTypeSupporter } // Factory is the union of ReaderFactory and WriterFactory. diff --git a/pkg/frame/k8s_reader_streaming.go b/pkg/frame/k8s_reader_streaming.go index 9ff21cec..4a0b796b 100644 --- a/pkg/frame/k8s_reader_streaming.go +++ b/pkg/frame/k8s_reader_streaming.go @@ -30,13 +30,13 @@ import ( "fmt" "io" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util/limitedio" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" ) // Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L63-L67 -func newK8sStreamingReader(rc io.ReadCloser, maxFrameSize int64) content.ClosableRawSegmentReader { +func newK8sStreamingReader(rc io.ReadCloser, maxFrameSize int64) stream.ClosableRawSegmentReader { if maxFrameSize == 0 { maxFrameSize = limitedio.DefaultMaxReadSize.Int64() } diff --git a/pkg/frame/options.go b/pkg/frame/options.go index a3976578..55ccef10 100644 --- a/pkg/frame/options.go +++ b/pkg/frame/options.go @@ -1,8 +1,8 @@ package frame import ( - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util/limitedio" ) @@ -58,7 +58,7 @@ func defaultRecognizingReaderOptions() *recognizingReaderOptions { return &recognizingReaderOptions{ RecognizingOptions: RecognizingOptions{ Options: defaultReaderOptions().Options, - Recognizer: content.NewJSONYAMLContentTypeRecognizer(), + Recognizer: stream.NewJSONYAMLContentTypeRecognizer(), }, } } @@ -67,7 +67,7 @@ func defaultRecognizingWriterOptions() *recognizingWriterOptions { return &recognizingWriterOptions{ RecognizingOptions: RecognizingOptions{ Options: defaultWriterOptions().Options, - Recognizer: content.NewJSONYAMLContentTypeRecognizer(), + Recognizer: stream.NewJSONYAMLContentTypeRecognizer(), }, } } @@ -109,7 +109,7 @@ func (o Options) applyTo(target *Options) { type RecognizingOptions struct { Options - Recognizer content.ContentTypeRecognizer + Recognizer stream.ContentTypeRecognizer } func (o RecognizingOptions) applyToRecognizing(target *RecognizingOptions) { diff --git a/pkg/frame/reader.go b/pkg/frame/reader.go index b2800d32..c97635c5 100644 --- a/pkg/frame/reader.go +++ b/pkg/frame/reader.go @@ -4,8 +4,8 @@ import ( "context" "sync" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/tracing" "github.com/weaveworks/libgitops/pkg/util/limitedio" "go.opentelemetry.io/otel/trace" @@ -61,9 +61,9 @@ func (r *highlevelReader) ReadFrame(ctx context.Context) ([]byte, error) { } // Record how large the frame is, and its content for debugging - span.SetAttributes(content.SpanAttrByteContent(frame)...) + span.SetAttributes(stream.SpanAttrByteContent(frame)...) return nil - }).RegisterCustom(content.SpanRegisterReadError) + }).RegisterCustom(stream.SpanRegisterReadError) // SpanRegisterReadError registers io.EOF as an "event", and other errors as "unknown errors" in the trace if err != nil { return nil, err @@ -108,6 +108,6 @@ func (r *highlevelReader) readFrame(ctx context.Context) ([]byte, error) { return frame, nil } -func (r *highlevelReader) ContentType() content.ContentType { return r.read.ContentType() } -func (r *highlevelReader) Close(ctx context.Context) error { return closeWithTrace(ctx, r.read, r) } -func (r *highlevelReader) ContentMetadata() content.Metadata { return r.read.ContentMetadata() } +func (r *highlevelReader) ContentType() stream.ContentType { return r.read.ContentType() } +func (r *highlevelReader) Close(ctx context.Context) error { return closeWithTrace(ctx, r.read, r) } +func (r *highlevelReader) ContentMetadata() stream.Metadata { return r.read.ContentMetadata() } diff --git a/pkg/frame/reader_factory.go b/pkg/frame/reader_factory.go index adbea975..c6411391 100644 --- a/pkg/frame/reader_factory.go +++ b/pkg/frame/reader_factory.go @@ -3,7 +3,7 @@ package frame import ( "context" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util/limitedio" ) @@ -15,22 +15,22 @@ var internalFactoryVar = DefaultFactory() type defaultFactory struct{} -func (defaultFactory) NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader { +func (defaultFactory) NewReader(ct stream.ContentType, r stream.Reader, opts ...ReaderOption) Reader { o := defaultReaderOptions().applyOptions(opts) var lowlevel Reader switch ct { - case content.ContentTypeYAML: + case stream.ContentTypeYAML: lowlevel = newYAMLReader(r, o) - case content.ContentTypeJSON: + case stream.ContentTypeJSON: lowlevel = newJSONReader(r, o) default: - return newErrReader(content.ErrUnsupportedContentType(ct), "", r.ContentMetadata()) + return newErrReader(stream.ErrUnsupportedContentType(ct), "", r.ContentMetadata()) } return newHighlevelReader(lowlevel, o) } -func (defaultFactory) NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader { +func (defaultFactory) NewSingleReader(ct stream.ContentType, r stream.Reader, opts ...SingleReaderOption) Reader { o := defaultSingleReaderOptions().applyOptions(opts) return newHighlevelReader(newSingleReader(r, ct, o), &readerOptions{ @@ -40,11 +40,11 @@ func (defaultFactory) NewSingleReader(ct content.ContentType, r content.Reader, }) } -func (f defaultFactory) NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader { +func (f defaultFactory) NewRecognizingReader(ctx context.Context, r stream.Reader, opts ...RecognizingReaderOption) Reader { o := defaultRecognizingReaderOptions().applyOptions(opts) // Recognize the content type using the given recognizer - r, ct, err := content.NewRecognizingReader(ctx, r, o.Recognizer) + r, ct, err := stream.NewRecognizingReader(ctx, r, o.Recognizer) if err != nil { return newErrReader(err, "", r.ContentMetadata()) } @@ -52,11 +52,11 @@ func (f defaultFactory) NewRecognizingReader(ctx context.Context, r content.Read return f.NewReader(ct, r, o) } -func (defaultFactory) SupportedContentTypes() content.ContentTypes { - return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON} +func (defaultFactory) SupportedContentTypes() stream.ContentTypes { + return []stream.ContentType{stream.ContentTypeYAML, stream.ContentTypeJSON} } -func newErrReader(err error, ct content.ContentType, meta content.Metadata) Reader { +func newErrReader(err error, ct stream.ContentType, meta stream.Metadata) Reader { return &errReader{ ct, meta.ToContainer(), @@ -67,8 +67,8 @@ func newErrReader(err error, ct content.ContentType, meta content.Metadata) Read // errReader always returns an error type errReader struct { - content.ContentTyped - content.MetadataContainer + stream.ContentTyped + stream.MetadataContainer Closer err error } diff --git a/pkg/frame/reader_single.go b/pkg/frame/reader_single.go index 27470721..bcf74247 100644 --- a/pkg/frame/reader_single.go +++ b/pkg/frame/reader_single.go @@ -4,12 +4,12 @@ import ( "context" "io" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" ) -func newSingleReader(r content.Reader, ct content.ContentType, o *singleReaderOptions) Reader { +func newSingleReader(r stream.Reader, ct stream.ContentType, o *singleReaderOptions) Reader { // Make sure not more than this set of bytes can be read - r, _ = content.WrapLimited(r, o.MaxFrameSize) + r, _ = stream.WrapLimited(r, o.MaxFrameSize) return &singleReader{ // TODO: Apply options? MetadataContainer: r.ContentMetadata().Clone().ToContainer(), @@ -22,9 +22,9 @@ func newSingleReader(r content.Reader, ct content.ContentType, o *singleReaderOp // It MUST be wrapped in a higher-level composite Reader like the highlevelReader to satisfy the // Reader interface correctly. type singleReader struct { - content.MetadataContainer - content.ContentTyped - r content.Reader + stream.MetadataContainer + stream.ContentTyped + r stream.Reader hasBeenRead bool } diff --git a/pkg/frame/reader_streaming.go b/pkg/frame/reader_streaming.go index 86efddbd..68f092cb 100644 --- a/pkg/frame/reader_streaming.go +++ b/pkg/frame/reader_streaming.go @@ -5,13 +5,13 @@ import ( "errors" "io" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util/limitedio" "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" ) -func newYAMLReader(r content.Reader, o *readerOptions) Reader { +func newYAMLReader(r stream.Reader, o *readerOptions) Reader { // json.YAMLFramer.NewFrameReader takes care of the actual YAML framing logic maxFrameSizeInt, err := o.MaxFrameSize.Int() if err != nil { @@ -22,22 +22,22 @@ func newYAMLReader(r content.Reader, o *readerOptions) Reader { }) // Mark the content type as YAML - r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeYAML)) + r.ContentMetadata().Apply(stream.WithContentType(stream.ContentTypeYAML)) - return newStreamingReader(content.ContentTypeYAML, r, o.MaxFrameSize) + return newStreamingReader(stream.ContentTypeYAML, r, o.MaxFrameSize) } // newJSONReader creates a "low-level" JSON Reader from the given io.ReadCloser. -func newJSONReader(r content.Reader, o *readerOptions) Reader { +func newJSONReader(r stream.Reader, o *readerOptions) Reader { // json.Framer.NewFrameReader takes care of the actual JSON framing logic r = r.Wrap(func(underlying io.ReadCloser) io.Reader { return json.Framer.NewFrameReader(underlying) }) // Mark the content type as JSON - r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeJSON)) + r.ContentMetadata().Apply(stream.WithContentType(stream.ContentTypeJSON)) - return newStreamingReader(content.ContentTypeJSON, r, o.MaxFrameSize) + return newStreamingReader(stream.ContentTypeJSON, r, o.MaxFrameSize) } // newStreamingReader makes a generic Reader that reads from an io.ReadCloser returned @@ -49,11 +49,11 @@ func newJSONReader(r content.Reader, o *readerOptions) Reader { // // Note: This Reader is a so-called "low-level" one. It doesn't do tracing, mutex locking, or // proper closing logic. It must be wrapped by a composite, high-level Reader like highlevelReader. -func newStreamingReader(ct content.ContentType, r content.Reader, maxFrameSize limitedio.Limit) Reader { - // Limit the amount of bytes read from the content.Reader - r, resetCounter := content.WrapLimited(r, maxFrameSize) +func newStreamingReader(ct stream.ContentType, r stream.Reader, maxFrameSize limitedio.Limit) Reader { + // Limit the amount of bytes read from the stream.Reader + r, resetCounter := stream.WrapLimited(r, maxFrameSize) // Wrap - cr := r.WrapSegment(func(rc io.ReadCloser) content.RawSegmentReader { + cr := r.WrapSegment(func(rc io.ReadCloser) stream.RawSegmentReader { return newK8sStreamingReader(rc, maxFrameSize.Int64()) }) @@ -72,10 +72,10 @@ func newStreamingReader(ct content.ContentType, r content.Reader, maxFrameSize l // given k8sStreamingReader. When reader_streaming_k8s.go is upstreamed, we can replace the // temporary k8sStreamingReader interface with a "proper" Kubernetes one. type streamingReader struct { - content.MetadataContainer - content.ContentTyped - resetCounter content.ResetCounterFunc - cr content.SegmentReader + stream.MetadataContainer + stream.ContentTyped + resetCounter stream.ResetCounterFunc + cr stream.SegmentReader maxFrameSize limitedio.Limit } diff --git a/pkg/frame/reader_test.go b/pkg/frame/reader_test.go index 3fd09e22..29276a40 100644 --- a/pkg/frame/reader_test.go +++ b/pkg/frame/reader_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/tracing" "github.com/weaveworks/libgitops/pkg/util/compositeio" "github.com/weaveworks/libgitops/pkg/util/limitedio" @@ -85,7 +85,7 @@ type testcase struct { } type testdata struct { - ct content.ContentType + ct stream.ContentType single, recognizing bool // frames contain the individual frames of rawData, which in turn is the content of the underlying // source/stream. if len(writeResults) == 0, there will be no checking that writing all frames @@ -129,7 +129,7 @@ const ( ` messyJSON = messyJSONP1 + messyJSONP2 - otherCT = content.ContentType("other") + otherCT = stream.ContentType("other") otherFrame = "('other'; 9)\n('bar'; true)" otherFrameLen = int64(len(otherFrame)) ) @@ -148,8 +148,8 @@ var defaultTestCases = []testcase{ { name: "simple roundtrip", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, - {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, + {ct: stream.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, + {ct: stream.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, }, writeResults: []error{nil, nil, nil, nil}, readResults: []error{nil, io.EOF, io.EOF, io.EOF}, @@ -158,8 +158,8 @@ var defaultTestCases = []testcase{ { name: "two-frame roundtrip with closed writer", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, - {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, + {ct: stream.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: stream.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, }, writeResults: []error{nil, nil, nil, nil}, readResults: []error{nil, nil, io.EOF, io.EOF}, @@ -168,14 +168,14 @@ var defaultTestCases = []testcase{ { name: "YAML Read: a newline will be added", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: noNewlineYAML, frames: []string{testYAML}}, + {ct: stream.ContentTypeYAML, rawData: noNewlineYAML, frames: []string{testYAML}}, }, readResults: []error{nil, io.EOF}, }, { name: "YAML Write: a newline will be added", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{noNewlineYAML}, rawData: yamlSep + testYAML}, + {ct: stream.ContentTypeYAML, frames: []string{noNewlineYAML}, rawData: yamlSep + testYAML}, }, writeResults: []error{nil}, }, @@ -183,26 +183,26 @@ var defaultTestCases = []testcase{ { name: "Read: io.EOF when there are no non-empty frames", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: "---"}, - {ct: content.ContentTypeYAML, rawData: "---\n"}, - {ct: content.ContentTypeJSON, rawData: ""}, - {ct: content.ContentTypeJSON, rawData: " \n "}, + {ct: stream.ContentTypeYAML, rawData: "---"}, + {ct: stream.ContentTypeYAML, rawData: "---\n"}, + {ct: stream.ContentTypeJSON, rawData: ""}, + {ct: stream.ContentTypeJSON, rawData: " \n "}, }, readResults: []error{io.EOF}, }, { name: "Write: Empty sanitized frames aren't written", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{"---", "---\n", " \n--- \n---"}}, - {ct: content.ContentTypeJSON, frames: []string{"", " \n ", " "}}, + {ct: stream.ContentTypeYAML, frames: []string{"---", "---\n", " \n--- \n---"}}, + {ct: stream.ContentTypeJSON, frames: []string{"", " \n ", " "}}, }, writeResults: []error{nil, nil, nil}, }, { name: "Write: can write empty frames forever without errors", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, - {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, + {ct: stream.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: stream.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, }, writeResults: []error{nil, nil, nil, nil, nil}, readResults: []error{nil, nil, io.EOF}, @@ -211,30 +211,30 @@ var defaultTestCases = []testcase{ { name: "YAML Read: a leading \\n--- will be ignored", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + noNewlineYAML, frames: []string{testYAML}}, + {ct: stream.ContentTypeYAML, rawData: "\n" + yamlSep + noNewlineYAML, frames: []string{testYAML}}, }, readResults: []error{nil, io.EOF}, }, { name: "YAML Read: a leading --- will be ignored", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: yamlSep + noNewlineYAML, frames: []string{testYAML}}, + {ct: stream.ContentTypeYAML, rawData: yamlSep + noNewlineYAML, frames: []string{testYAML}}, }, readResults: []error{nil, io.EOF}, }, { name: "Read: sanitize messy content", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: messyYAML, frames: []string{testYAML, testYAML}}, - {ct: content.ContentTypeJSON, rawData: messyJSON, frames: []string{testJSON, testJSON}}, + {ct: stream.ContentTypeYAML, rawData: messyYAML, frames: []string{testYAML, testYAML}}, + {ct: stream.ContentTypeJSON, rawData: messyJSON, frames: []string{testJSON, testJSON}}, }, readResults: []error{nil, nil, io.EOF}, }, { name: "Write: sanitize messy content", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{messyYAMLP1, messyYAMLP2}, rawData: yamlSep + testYAML + yamlSep + testYAML}, - {ct: content.ContentTypeJSON, frames: []string{messyJSONP1, messyJSONP2}, rawData: testJSON + testJSON}, + {ct: stream.ContentTypeYAML, frames: []string{messyYAMLP1, messyYAMLP2}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: stream.ContentTypeJSON, frames: []string{messyJSONP1, messyJSONP2}, rawData: testJSON + testJSON}, }, writeResults: []error{nil, nil}, }, @@ -242,8 +242,8 @@ var defaultTestCases = []testcase{ { name: "Read: the frame size is exactly within bounds, also enforce counter reset", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, - {ct: content.ContentTypeJSON, rawData: testJSON + testJSON, frames: []string{testJSON, testJSON}}, + {ct: stream.ContentTypeYAML, rawData: yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, + {ct: stream.ContentTypeJSON, rawData: testJSON + testJSON, frames: []string{testJSON, testJSON}}, }, singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, readResults: []error{nil, nil, io.EOF}, @@ -251,7 +251,7 @@ var defaultTestCases = []testcase{ { name: "YAML Read: there is a newline before the initial ---, should sanitize", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, + {ct: stream.ContentTypeYAML, rawData: "\n" + yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, }, singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, readResults: []error{nil, nil, io.EOF}, @@ -259,8 +259,8 @@ var defaultTestCases = []testcase{ { name: "Read: the frame is out of bounds, on the same line", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: testYAML}, - {ct: content.ContentTypeJSON, rawData: testJSON}, + {ct: stream.ContentTypeYAML, rawData: testYAML}, + {ct: stream.ContentTypeJSON, rawData: testJSON}, }, singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen - 2)}}, readResults: []error{&limitedio.ReadSizeOverflowError{}}, @@ -268,7 +268,7 @@ var defaultTestCases = []testcase{ { name: "YAML Read: the frame is out of bounds, but continues on the next line", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: testYAML + testYAML}, + {ct: stream.ContentTypeYAML, rawData: testYAML + testYAML}, }, singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, readResults: []error{&limitedio.ReadSizeOverflowError{}}, @@ -276,8 +276,8 @@ var defaultTestCases = []testcase{ { name: "Read: first frame ok, then always frame overflow", testdata: []testdata{ - {ct: content.ContentTypeYAML, rawData: testYAML + yamlSep + testYAML + testYAML, frames: []string{testYAML}}, - {ct: content.ContentTypeJSON, rawData: testJSON + testJSON2, frames: []string{testJSON}}, + {ct: stream.ContentTypeYAML, rawData: testYAML + yamlSep + testYAML + testYAML, frames: []string{testYAML}}, + {ct: stream.ContentTypeJSON, rawData: testJSON + testJSON2, frames: []string{testJSON}}, }, singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, readResults: []error{nil, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}}, @@ -285,8 +285,8 @@ var defaultTestCases = []testcase{ { name: "Write: the second frame is too large, ignore that, but allow writing smaller frames later", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML + testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, - {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2, testJSON}, rawData: testJSON + testJSON}, + {ct: stream.ContentTypeYAML, frames: []string{testYAML, testYAML + testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: stream.ContentTypeJSON, frames: []string{testJSON, testJSON2, testJSON}, rawData: testJSON + testJSON}, }, singleWriteOpts: []SingleWriterOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, writeResults: []error{nil, &limitedio.ReadSizeOverflowError{}, nil}, @@ -295,8 +295,8 @@ var defaultTestCases = []testcase{ { name: "first frame ok, then Read => EOF and Write => nil consistently", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, - {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, + {ct: stream.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, + {ct: stream.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, }, readResults: []error{nil, io.EOF, io.EOF, io.EOF, io.EOF}, writeResults: []error{nil, nil, nil, nil, nil}, @@ -305,8 +305,8 @@ var defaultTestCases = []testcase{ { name: "Write: Don't allow writing more than a maximum amount of frames", testdata: []testdata{ - {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, - {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON, testJSON}, rawData: testJSON + testJSON}, + {ct: stream.ContentTypeYAML, frames: []string{testYAML, testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: stream.ContentTypeJSON, frames: []string{testJSON, testJSON, testJSON}, rawData: testJSON + testJSON}, }, writeResults: []error{nil, nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, writeOpts: []WriterOption{&Options{MaxFrameCount: 2}}, @@ -314,10 +314,10 @@ var defaultTestCases = []testcase{ { name: "Read: Don't allow reading more than a maximum amount of successful frames", testdata: []testdata{ - {ct: content.ContentTypeYAML, + {ct: stream.ContentTypeYAML, rawData: testYAML + yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, - {ct: content.ContentTypeJSON, + {ct: stream.ContentTypeJSON, rawData: testJSON + testJSON + testJSON, frames: []string{testJSON, testJSON}}, }, @@ -327,7 +327,7 @@ var defaultTestCases = []testcase{ { name: "Read: Don't allow reading more than a maximum amount of successful frames, and 10x in total", testdata: []testdata{ - {ct: content.ContentTypeYAML, + {ct: stream.ContentTypeYAML, rawData: strings.Repeat("\n"+yamlSep, 10) + testYAML}, }, readResults: []error{&FrameCountOverflowError{}, &FrameCountOverflowError{}}, @@ -336,7 +336,7 @@ var defaultTestCases = []testcase{ { name: "Read: Allow reading up to the maximum amount of 10x the successful frames count", testdata: []testdata{ - {ct: content.ContentTypeYAML, + {ct: stream.ContentTypeYAML, rawData: strings.Repeat("\n"+yamlSep, 9) + testYAML + yamlSep + yamlSep, frames: []string{testYAML}}, }, readResults: []error{nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, @@ -345,10 +345,10 @@ var defaultTestCases = []testcase{ { name: "Read: Allow reading exactly that amount of successful frames, if then io.EOF", testdata: []testdata{ - {ct: content.ContentTypeYAML, + {ct: stream.ContentTypeYAML, rawData: testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, - {ct: content.ContentTypeJSON, + {ct: stream.ContentTypeJSON, rawData: testJSON + testJSON, frames: []string{testJSON, testJSON}}, }, @@ -446,8 +446,8 @@ func (h *FactoryTester) testRoundtripCaseContentType(t *testing.T, ctx context.C readCloseCounter := &recordingCloser{} writeCloseCounter := &recordingCloser{} - cw := content.NewWriter(compositeio.WriteCloser(&buf, writeCloseCounter)) - cr := content.NewReader(compositeio.ReadCloser(&buf, readCloseCounter)) + cw := stream.NewWriter(compositeio.WriteCloser(&buf, writeCloseCounter)) + cr := stream.NewReader(compositeio.ReadCloser(&buf, readCloseCounter)) var w Writer if d.single && d.recognizing { panic("cannot be both single and recognizing") @@ -458,7 +458,7 @@ func (h *FactoryTester) testRoundtripCaseContentType(t *testing.T, ctx context.C } else { w = h.factory.NewWriter(d.ct, cw, c.writeOpts...) } - assert.Equalf(t, w.ContentType(), d.ct, "Writer.content.ContentType") + assert.Equalf(t, w.ContentType(), d.ct, "Writer.ContentType") var r Reader if d.single && d.recognizing { @@ -470,7 +470,7 @@ func (h *FactoryTester) testRoundtripCaseContentType(t *testing.T, ctx context.C } else { r = h.factory.NewReader(d.ct, cr, c.readOpts...) } - assert.Equalf(t, r.ContentType(), d.ct, "Reader.content.ContentType") + assert.Equalf(t, r.ContentType(), d.ct, "Reader.ContentType") // Write frames using the writer for i, expected := range c.writeResults { diff --git a/pkg/frame/sanitize/sanitize.go b/pkg/frame/sanitize/sanitize.go index 90547db9..398e1baf 100644 --- a/pkg/frame/sanitize/sanitize.go +++ b/pkg/frame/sanitize/sanitize.go @@ -7,8 +7,8 @@ import ( "errors" "strings" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments" + "github.com/weaveworks/libgitops/pkg/stream" "k8s.io/utils/pointer" "sigs.k8s.io/kustomize/kyaml/kio" "sigs.k8s.io/kustomize/kyaml/yaml" @@ -24,9 +24,9 @@ type Sanitizer interface { // other framing types than the default ones, which might not be desired. // // The returned frame should have len == 0 if it's considered empty. - Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error) + Sanitize(ctx context.Context, ct stream.ContentType, frame []byte) ([]byte, error) - content.ContentTypeSupporter + stream.ContentTypeSupporter } // defaultSanitizer implements frame sanitation for JSON and YAML. @@ -115,11 +115,11 @@ type defaultSanitizer struct { opts *jsonYAMLOptions } -func (s *defaultSanitizer) Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error) { +func (s *defaultSanitizer) Sanitize(ctx context.Context, ct stream.ContentType, frame []byte) ([]byte, error) { switch ct { - case content.ContentTypeYAML: + case stream.ContentTypeYAML: return s.handleYAML(ctx, frame) - case content.ContentTypeJSON: + case stream.ContentTypeJSON: return s.handleJSON(frame) default: // Just passthrough @@ -127,8 +127,8 @@ func (s *defaultSanitizer) Sanitize(ctx context.Context, ct content.ContentType, } } -func (defaultSanitizer) SupportedContentTypes() content.ContentTypes { - return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON} +func (defaultSanitizer) SupportedContentTypes() stream.ContentTypes { + return []stream.ContentType{stream.ContentTypeYAML, stream.ContentTypeJSON} } var ErrTooManyFrames = errors.New("too many frames") @@ -205,7 +205,7 @@ func (s *defaultSanitizer) handleJSON(frame []byte) ([]byte, error) { return append(bytes.TrimSpace(buf.Bytes()), '\n'), nil } -func IfSupported(ctx context.Context, s Sanitizer, ct content.ContentType, frame []byte) ([]byte, error) { +func IfSupported(ctx context.Context, s Sanitizer, ct stream.ContentType, frame []byte) ([]byte, error) { // If the content type isn't supported, nothing to do if s == nil || !s.SupportedContentTypes().Has(ct) { return frame, nil diff --git a/pkg/frame/sanitize/sanitize_test.go b/pkg/frame/sanitize/sanitize_test.go index cb8682a3..30b4ce15 100644 --- a/pkg/frame/sanitize/sanitize_test.go +++ b/pkg/frame/sanitize/sanitize_test.go @@ -6,14 +6,14 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" ) func Test_defaultSanitizer_Sanitize(t *testing.T) { tests := []struct { name string opts []JSONYAMLOption - ct content.ContentType + ct stream.ContentType prior string frame string want string @@ -22,13 +22,13 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }{ { name: "passthrough whatever", - ct: content.ContentType("unknown"), + ct: stream.ContentType("unknown"), frame: "{randomdata:", want: "{randomdata:", }, { name: "default compact", - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: `{ "foo": { "bar": "baz" @@ -40,7 +40,7 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }, { name: "with two spaces", - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: ` { "foo" : "bar" } `, opts: []JSONYAMLOption{WithSpacesIndent(2)}, @@ -51,7 +51,7 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }, { name: "with four spaces", - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: ` { "foo" : {"bar": "baz"} } `, opts: []JSONYAMLOption{WithSpacesIndent(4)}, @@ -64,7 +64,7 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }, { name: "with tab indent", - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: ` { "foo" : {"bar": "baz"} } `, opts: []JSONYAMLOption{WithTabsIndent(1)}, @@ -77,7 +77,7 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }, { name: "with malformed", - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: `{"foo":"`, opts: []JSONYAMLOption{WithCompactIndent()}, checkErr: func(err error) bool { @@ -87,7 +87,7 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }, { name: "only whitespace", - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: ` `, @@ -95,13 +95,13 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }, { name: "no json", - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: "", want: "", }, { name: "weird empty formatting", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, frame: ` --- @@ -111,13 +111,13 @@ func Test_defaultSanitizer_Sanitize(t *testing.T) { }, { name: "no yaml", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, frame: "", want: "", }, { name: "too many frames", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, frame: `aa: true --- bb: false @@ -126,7 +126,7 @@ bb: false }, { name: "make sure lists are not expanded", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, frame: `--- kind: List apiVersion: "v1" @@ -143,7 +143,7 @@ items: }, { name: "yaml format; don't be confused by the bar commend", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, frame: `--- kind: List @@ -164,7 +164,7 @@ items: }, { name: "detect indentation; don't be confused by the bar commend", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, frame: `--- kind: List @@ -185,7 +185,7 @@ items: }, { name: "force compact", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, opts: []JSONYAMLOption{WithCompactSeqIndent()}, frame: `--- @@ -207,7 +207,7 @@ items: }, { name: "force wide", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, opts: []JSONYAMLOption{WithWideSeqIndent()}, frame: `--- @@ -229,7 +229,7 @@ items: }, { name: "invalid indentation", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, frame: `--- kind: "foo" @@ -240,7 +240,7 @@ kind: "foo" }, { name: "infer seq style from prior; default is compact", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, opts: []JSONYAMLOption{}, prior: `# root # no lists here to look at @@ -269,7 +269,7 @@ items: }, { name: "copy comments; infer seq style from prior", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, opts: []JSONYAMLOption{}, prior: `# root # hello @@ -317,7 +317,7 @@ items: }, { name: "don't copy comments; infer from prior", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, opts: []JSONYAMLOption{WithNoCommentsCopy()}, prior: `# root # hello @@ -357,7 +357,7 @@ items: # new }, { name: "invalid prior", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, prior: `# root # hello @@ -389,7 +389,7 @@ items: # new }, { name: "invalid copy comments; change from scalar to mapping node", - ct: content.ContentTypeYAML, + ct: stream.ContentTypeYAML, prior: `# root foo: "bar" # baz`, frame: ` @@ -425,7 +425,7 @@ func TestIfSupported(t *testing.T) { tests := []struct { name string s Sanitizer - ct content.ContentType + ct stream.ContentType frame string want string wantErr bool @@ -438,14 +438,14 @@ func TestIfSupported(t *testing.T) { { name: "unknown content type", s: NewJSONYAML(), - ct: content.ContentType("unknown"), + ct: stream.ContentType("unknown"), frame: "foo", want: "foo", }, { name: "sanitize", s: NewJSONYAML(WithCompactIndent()), - ct: content.ContentTypeJSON, + ct: stream.ContentTypeJSON, frame: ` { "foo" : true } `, want: `{"foo":true} `, diff --git a/pkg/frame/utils_test.go b/pkg/frame/utils_test.go index c0c850f5..dc5b243c 100644 --- a/pkg/frame/utils_test.go +++ b/pkg/frame/utils_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/tracing" "github.com/weaveworks/libgitops/pkg/util/compositeio" "github.com/weaveworks/libgitops/pkg/util/limitedio" @@ -33,8 +33,8 @@ func TestFromConstructors(t *testing.T) { got, err := FromYAMLFile(yamlPath).ReadFrame(ctx) assert.Nil(t, err) assert.Equal(t, str, string(got)) - // content.FromFile -- already closed - f := content.FromFile(yamlPath) + // stream.FromFile -- already closed + f := stream.FromFile(yamlPath) (f.(rawCloserExposer)).RawCloser().Close() // deliberately close the file before giving it to the reader got, err = NewYAMLReader(f).ReadFrame(ctx) assert.ErrorIs(t, err, fs.ErrClosed) @@ -57,7 +57,7 @@ func TestFromConstructors(t *testing.T) { func TestToIoWriteCloser(t *testing.T) { var buf bytes.Buffer closeRec := &recordingCloser{} - cw := content.NewWriter(compositeio.WriteCloser(&buf, closeRec)) + cw := stream.NewWriter(compositeio.WriteCloser(&buf, closeRec)) w := NewYAMLWriter(cw, SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}) ctx := tracing.Context(true) iow := ToIoWriteCloser(ctx, w) @@ -91,7 +91,7 @@ func TestListFromReader(t *testing.T) { assert.Nil(t, err) // Non-happy case - r := NewJSONReader(content.FromString(testJSON2), SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen - 1)}) + r := NewJSONReader(stream.FromString(testJSON2), SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen - 1)}) fr, err = ListFromReader(ctx, r) assert.Len(t, fr, 0) assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{}) @@ -101,8 +101,8 @@ func TestListFromReader(t *testing.T) { func TestList_WriteTo(t *testing.T) { var buf bytes.Buffer // TODO: Automatically get the name of the writer passed in, to avoid having to name - // everything. i.e. content.NewWriterName(string, io.Writer) - cw := content.NewWriter(&buf) + // everything. i.e. stream.NewWriterName(string, io.Writer) + cw := stream.NewWriter(&buf) w := NewYAMLWriter(cw) ctx := context.Background() // Happy case diff --git a/pkg/frame/writer.go b/pkg/frame/writer.go index 5a6f93fc..7ded38f1 100644 --- a/pkg/frame/writer.go +++ b/pkg/frame/writer.go @@ -4,8 +4,8 @@ import ( "context" "sync" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/tracing" "github.com/weaveworks/libgitops/pkg/util/limitedio" "go.opentelemetry.io/otel/trace" @@ -50,7 +50,7 @@ func (w *highlevelWriter) WriteFrame(ctx context.Context, frame []byte) error { } // Register the amount of (sanitized) bytes and call the underlying Writer - span.SetAttributes(content.SpanAttrByteContent(frame)...) + span.SetAttributes(stream.SpanAttrByteContent(frame)...) // Catch empty frames if len(frame) == 0 { @@ -67,10 +67,10 @@ func (w *highlevelWriter) WriteFrame(ctx context.Context, frame []byte) error { }).Register() } -func (w *highlevelWriter) ContentType() content.ContentType { return w.writer.ContentType() } +func (w *highlevelWriter) ContentType() stream.ContentType { return w.writer.ContentType() } func (w *highlevelWriter) Close(ctx context.Context) error { return closeWithTrace(ctx, w.writer, w) } // Just forward the metadata, don't do anything specific with it -func (w *highlevelWriter) ContentMetadata() content.Metadata { return w.writer.ContentMetadata() } +func (w *highlevelWriter) ContentMetadata() stream.Metadata { return w.writer.ContentMetadata() } diff --git a/pkg/frame/writer_delegate.go b/pkg/frame/writer_delegate.go index fa968e97..e2a3863f 100644 --- a/pkg/frame/writer_delegate.go +++ b/pkg/frame/writer_delegate.go @@ -4,10 +4,10 @@ import ( "context" "io" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" ) -func newDelegatingWriter(ct content.ContentType, w content.Writer) Writer { +func newDelegatingWriter(ct stream.ContentType, w stream.Writer) Writer { return &delegatingWriter{ // TODO: Register options? MetadataContainer: w.ContentMetadata().Clone().ToContainer(), @@ -18,9 +18,9 @@ func newDelegatingWriter(ct content.ContentType, w content.Writer) Writer { // delegatingWriter is an implementation of the Writer interface type delegatingWriter struct { - content.MetadataContainer - content.ContentTyped - w content.Writer + stream.MetadataContainer + stream.ContentTyped + w stream.Writer } func (w *delegatingWriter) WriteFrame(ctx context.Context, frame []byte) error { @@ -32,7 +32,7 @@ func (w *delegatingWriter) WriteFrame(ctx context.Context, frame []byte) error { func (w *delegatingWriter) Close(ctx context.Context) error { return w.w.WithContext(ctx).Close() } -func newErrWriter(ct content.ContentType, err error, meta content.Metadata) Writer { +func newErrWriter(ct stream.ContentType, err error, meta stream.Metadata) Writer { return &errWriter{ meta.Clone().ToContainer(), ct, @@ -42,8 +42,8 @@ func newErrWriter(ct content.ContentType, err error, meta content.Metadata) Writ } type errWriter struct { - content.MetadataContainer - content.ContentTyped + stream.MetadataContainer + stream.ContentTyped Closer err error } diff --git a/pkg/frame/writer_factory.go b/pkg/frame/writer_factory.go index 1191648c..e7a9f426 100644 --- a/pkg/frame/writer_factory.go +++ b/pkg/frame/writer_factory.go @@ -3,30 +3,30 @@ package frame import ( "io" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" "k8s.io/apimachinery/pkg/runtime/serializer/json" ) -func (defaultFactory) NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer { +func (defaultFactory) NewWriter(ct stream.ContentType, w stream.Writer, opts ...WriterOption) Writer { o := defaultWriterOptions().applyOptions(opts) var lowlevel Writer switch ct { - case content.ContentTypeYAML: - lowlevel = newDelegatingWriter(content.ContentTypeYAML, w.Wrap(func(underlying io.WriteCloser) io.Writer { + case stream.ContentTypeYAML: + lowlevel = newDelegatingWriter(stream.ContentTypeYAML, w.Wrap(func(underlying io.WriteCloser) io.Writer { // This writer always prepends a "---" before each frame return json.YAMLFramer.NewFrameWriter(underlying) })) - case content.ContentTypeJSON: + case stream.ContentTypeJSON: // JSON documents are self-framing; hence, no need to wrap the writer in any way - lowlevel = newDelegatingWriter(content.ContentTypeJSON, w) + lowlevel = newDelegatingWriter(stream.ContentTypeJSON, w) default: - return newErrWriter(ct, content.ErrUnsupportedContentType(ct), w.ContentMetadata()) + return newErrWriter(ct, stream.ErrUnsupportedContentType(ct), w.ContentMetadata()) } return newHighlevelWriter(lowlevel, o) } -func (defaultFactory) NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer { +func (defaultFactory) NewSingleWriter(ct stream.ContentType, w stream.Writer, opts ...SingleWriterOption) Writer { o := defaultSingleWriterOptions().applyOptions(opts) return newHighlevelWriter(newDelegatingWriter(ct, w), &writerOptions{ @@ -37,11 +37,11 @@ func (defaultFactory) NewSingleWriter(ct content.ContentType, w content.Writer, }) } -func (f defaultFactory) NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer { +func (f defaultFactory) NewRecognizingWriter(w stream.Writer, opts ...RecognizingWriterOption) Writer { o := defaultRecognizingWriterOptions().applyOptions(opts) // Recognize the content type using the given recognizer - r, ct, err := content.NewRecognizingWriter(w, o.Recognizer) + r, ct, err := stream.NewRecognizingWriter(w, o.Recognizer) if err != nil { return newErrWriter("", err, r.ContentMetadata()) } diff --git a/pkg/frame/writer_test.go b/pkg/frame/writer_test.go index 80281407..4a0fe977 100644 --- a/pkg/frame/writer_test.go +++ b/pkg/frame/writer_test.go @@ -7,21 +7,21 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" ) func TestNewWriter_Unrecognized(t *testing.T) { - fr := DefaultFactory().NewWriter(content.ContentType("doesnotexist"), content.NewWriter(io.Discard)) + fr := DefaultFactory().NewWriter(stream.ContentType("doesnotexist"), stream.NewWriter(io.Discard)) ctx := context.Background() err := fr.WriteFrame(ctx, make([]byte, 1)) - assert.ErrorIs(t, err, &content.UnsupportedContentTypeError{}) + assert.ErrorIs(t, err, &stream.UnsupportedContentTypeError{}) } func TestWriterShortBuffer(t *testing.T) { var buf bytes.Buffer w := &halfWriter{&buf} ctx := context.Background() - err := NewYAMLWriter(content.NewWriter(w)).WriteFrame(ctx, []byte("foo: bar")) + err := NewYAMLWriter(stream.NewWriter(w)).WriteFrame(ctx, []byte("foo: bar")) assert.Equal(t, io.ErrShortWrite, err) } diff --git a/pkg/serializer/comments.go b/pkg/serializer/comments.go index 397a1bc3..cb73fbe2 100644 --- a/pkg/serializer/comments.go +++ b/pkg/serializer/comments.go @@ -8,9 +8,9 @@ import ( "fmt" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments" + "github.com/weaveworks/libgitops/pkg/stream" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/kustomize/kyaml/yaml" @@ -27,10 +27,10 @@ var ( // tryToPreserveComments tries to save the original file data (base64-encoded) into an annotation. // This original file data can be used at encoding-time to preserve comments -func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct content.ContentType) { +func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct stream.ContentType) { // If the user opted into preserving comments and the format is YAML, proceed // If they didn't, return directly - if !(*d.opts.PreserveComments && ct == content.ContentTypeYAML) { + if !(*d.opts.PreserveComments && ct == stream.ContentTypeYAML) { return } @@ -50,8 +50,8 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw fr } // The user requested to preserve comments, but content type is not YAML, so log, sanitize and return - if fw.ContentType() != content.ContentTypeYAML { - logrus.Debugf("Asked to preserve comments, but content.ContentType is not YAML, so ignoring") + if fw.ContentType() != stream.ContentTypeYAML { + logrus.Debugf("Asked to preserve comments, but stream.ContentType is not YAML, so ignoring") // Normal encoding without the annotation (so it doesn't leak by accident) return noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, fw, obj)) diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index c2e1a85e..66a304ef 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -6,8 +6,8 @@ import ( "io" "reflect" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -41,7 +41,7 @@ type DecodingOptions struct { DecodeListElements *bool // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. - // Only applicable to content.ContentTypeYAML framers. + // Only applicable to stream.ContentTypeYAML framers. // Using any other framer will be silently ignored. Usage of this option also requires setting // the PreserveComments in EncodingOptions, too. (Default: false) PreserveComments *bool @@ -150,7 +150,7 @@ func (d *decoder) Decode(fr frame.Reader) (runtime.Object, error) { return d.decode(doc, nil, fr.ContentType()) } -func (d *decoder) decode(doc []byte, into runtime.Object, ct content.ContentType) (runtime.Object, error) { +func (d *decoder) decode(doc []byte, into runtime.Object, ct stream.ContentType) (runtime.Object, error) { // If the scheme doesn't recognize a v1.List, and we enabled opts.DecodeListElements, // make the scheme able to decode the v1.List automatically if *d.opts.DecodeListElements && !d.scheme.Recognizes(listGVK) { @@ -263,7 +263,7 @@ func (d *decoder) DecodeAll(fr frame.Reader) ([]runtime.Object, error) { } // decodeUnknown decodes bytes of a certain content type into a returned *runtime.Unknown object -func (d *decoder) decodeUnknown(doc []byte, ct content.ContentType) (runtime.Object, error) { +func (d *decoder) decodeUnknown(doc []byte, ct stream.ContentType) (runtime.Object, error) { // Do a DecodeInto the new pointer to the object we've got. The resulting into object is // also returned. // The content type isn't really used here, as runtime.Unknown will never implement @@ -300,7 +300,7 @@ func (d *decoder) handleDecodeError(doc []byte, origErr error) error { return origErr } -func (d *decoder) extractNestedObjects(obj runtime.Object, ct content.ContentType) ([]runtime.Object, error) { +func (d *decoder) extractNestedObjects(obj runtime.Object, ct stream.ContentType) ([]runtime.Object, error) { // If we didn't ask for list-unwrapping functionality, return directly if !*d.opts.DecodeListElements { return []runtime.Object{obj}, nil diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index 5e01efee..c61be904 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -4,8 +4,8 @@ import ( "context" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -16,7 +16,7 @@ type EncodingOptions struct { // TODO: Fix that sometimes omitempty fields aren't respected Pretty *bool // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. - // Only applicable to content.ContentTypeYAML framers. + // Only applicable to stream.ContentTypeYAML framers. // Using any other framer will be silently ignored. Usage of this option also requires setting // the PreserveComments in DecodingOptions, too. (Default: false) // TODO: Make this a BestEffort & Strict mode @@ -75,7 +75,7 @@ func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder { } // Encode encodes the given objects and writes them to the specified frame.Writer. -// The frame.Writer specifies the content.ContentType. This encoder will automatically convert any +// The frame.Writer specifies the stream.ContentType. This encoder will automatically convert any // internal object given to the preferred external groupversion. No conversion will happen // if the given object is of an external version. // TODO: This should automatically convert to the preferred version @@ -106,13 +106,13 @@ func (e *encoder) Encode(fw frame.Writer, objs ...runtime.Object) error { // EncodeForGroupVersion encodes the given object for the specific groupversion. If the object // is not of that version currently it will try to convert. The output bytes are written to the -// frame.Writer. The frame.Writer specifies the content.ContentType. +// frame.Writer. The frame.Writer specifies the stream.ContentType. func (e *encoder) EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error { // Get the serializer for the media type serializerInfo, ok := runtime.SerializerInfoForMediaType(e.codecs.SupportedMediaTypes(), fw.ContentType().String()) if !ok { // TODO: Also mention what content types _are_ supported here - return content.ErrUnsupportedContentType(fw.ContentType()) + return stream.ErrUnsupportedContentType(fw.ContentType()) } // Choose the pretty or non-pretty one @@ -125,7 +125,7 @@ func (e *encoder) EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv if serializerInfo.PrettySerializer != nil { encoder = serializerInfo.PrettySerializer } else { - logrus.Debugf("PrettySerializer for content.ContentType %s is nil, falling back to Serializer.", fw.ContentType()) + logrus.Debugf("PrettySerializer for stream.ContentType %s is nil, falling back to Serializer.", fw.ContentType()) } } diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index de4be7ec..316fcfa1 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -49,14 +49,14 @@ type schemeAndCodec struct { // to a frame.Writer. type Encoder interface { // Encode encodes the given objects and writes them to the specified frame.Writer. - // The frame.Writer specifies the content.ContentType. This encoder will automatically convert any + // The frame.Writer specifies the stream.ContentType. This encoder will automatically convert any // internal object given to the preferred external groupversion. No conversion will happen // if the given object is of an external version. Encode(fw frame.Writer, obj ...runtime.Object) error // EncodeForGroupVersion encodes the given object for the specific groupversion. If the object // is not of that version currently it will try to convert. The output bytes are written to the - // frame.Writer. The frame.Writer specifies the content.ContentType. + // frame.Writer. The frame.Writer specifies the stream.ContentType. EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error } diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go index c5f6193c..0ce45ef1 100644 --- a/pkg/serializer/serializer_test.go +++ b/pkg/serializer/serializer_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" + "github.com/weaveworks/libgitops/pkg/stream" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" @@ -358,18 +358,18 @@ func TestEncode(t *testing.T) { newCRDObj := &CRDNewVersion{OtherString: "foobar"} tests := []struct { name string - ct content.ContentType + ct stream.ContentType objs []runtime.Object want []byte wantErr error }{ - {"simple yaml", content.ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, nil}, - {"complex yaml", content.ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, nil}, - {"both simple and complex yaml", content.ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, nil}, - {"simple json", content.ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, nil}, - {"complex json", content.ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, nil}, - {"old CRD yaml", content.ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, nil}, - {"new CRD yaml", content.ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, nil}, + {"simple yaml", stream.ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, nil}, + {"complex yaml", stream.ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, nil}, + {"both simple and complex yaml", stream.ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, nil}, + {"simple json", stream.ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, nil}, + {"complex json", stream.ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, nil}, + {"old CRD yaml", stream.ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, nil}, + {"new CRD yaml", stream.ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, nil}, //{"no-conversion simple", defaultEncoder, &runtimetest.ExternalSimple{TestString: "foo"}, simpleJSON, false}, //{"support internal", defaultEncoder, []runtime.Object{simpleObj}, []byte(`{"testString":"foo"}` + "\n"), false}, } @@ -377,7 +377,7 @@ func TestEncode(t *testing.T) { for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { var buf bytes.Buffer - cw := content.ToBuffer(&buf, content.WithContentType(rt.ct)) + cw := stream.ToBuffer(&buf, stream.WithContentType(rt.ct)) err := defaultEncoder.Encode(frame.NewRecognizingWriter(cw), rt.objs...) assert.ErrorIs(t, err, rt.wantErr) assert.Equal(t, string(rt.want), buf.String()) @@ -415,7 +415,7 @@ func TestDecode(t *testing.T) { obj, err := ourserializer.Decoder( WithDefaultsDecode(rt.doDefaulting), WithConvertToHubDecode(rt.doConversion), - ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) + ).Decode(frame.NewYAMLReader(stream.FromBytes(rt.data))) assert.Equal(t, err != nil, rt.wantErr) assert.Equal(t, rt.want, obj) }) @@ -452,7 +452,7 @@ func TestDecodeInto(t *testing.T) { actual := ourserializer.Decoder( WithDefaultsDecode(rt.doDefaulting), - ).DecodeInto(frame.NewYAMLReader(content.FromBytes(rt.data)), rt.obj) + ).DecodeInto(frame.NewYAMLReader(stream.FromBytes(rt.data)), rt.obj) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -493,7 +493,7 @@ func TestDecodeAll(t *testing.T) { objs, actual := ourserializer.Decoder( WithDefaultsDecode(rt.doDefaulting), WithListElementsDecoding(rt.listSplit), - ).DecodeAll(frame.NewYAMLReader(content.FromBytes(rt.data))) + ).DecodeAll(frame.NewYAMLReader(stream.FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -535,7 +535,7 @@ func TestDecodeUnknown(t *testing.T) { t.Run(rt.name, func(t2 *testing.T) { obj, actual := ourserializer.Decoder( WithUnknownDecode(rt.unknown), - ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) + ).Decode(frame.NewYAMLReader(stream.FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -550,15 +550,15 @@ func TestRoundtrip(t *testing.T) { tests := []struct { name string data []byte - ct content.ContentType + ct stream.ContentType gv *schema.GroupVersion // use a specific groupversion if set. if nil, then use the default Encode }{ - {"simple yaml", oneSimple, content.ContentTypeYAML, nil}, - {"complex yaml", oneComplex, content.ContentTypeYAML, nil}, - {"simple json", simpleJSON, content.ContentTypeJSON, nil}, - {"complex json", complexJSON, content.ContentTypeJSON, nil}, - {"crd with objectmeta & comments", oldCRD, content.ContentTypeYAML, &ext1gv}, // encode as v1alpha1 - {"unknown object", unrecognizedGVK, content.ContentTypeYAML, nil}, + {"simple yaml", oneSimple, stream.ContentTypeYAML, nil}, + {"complex yaml", oneComplex, stream.ContentTypeYAML, nil}, + {"simple json", simpleJSON, stream.ContentTypeJSON, nil}, + {"complex json", complexJSON, stream.ContentTypeJSON, nil}, + {"crd with objectmeta & comments", oldCRD, stream.ContentTypeYAML, &ext1gv}, // encode as v1alpha1 + {"unknown object", unrecognizedGVK, stream.ContentTypeYAML, nil}, // TODO: Maybe an unit test (case) for a type with ObjectMeta embedded as a pointer being nil // TODO: Make sure that the Encode call (with comments support) doesn't mutate the object state // i.e. doesn't remove the annotation after use so multiple similar encode calls work. @@ -570,13 +570,13 @@ func TestRoundtrip(t *testing.T) { WithConvertToHubDecode(true), WithCommentsDecode(true), WithUnknownDecode(true), - ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) + ).Decode(frame.NewYAMLReader(stream.FromBytes(rt.data))) if err != nil { t2.Errorf("unexpected decode error: %v", err) return } var buf bytes.Buffer - cw := content.ToBuffer(&buf, content.WithContentType(rt.ct)) + cw := stream.ToBuffer(&buf, stream.WithContentType(rt.ct)) if rt.gv == nil { err = defaultEncoder.Encode(frame.NewRecognizingWriter(cw), obj) } else { @@ -692,13 +692,13 @@ testString: bar func TestListRoundtrip(t *testing.T) { objs, err := ourserializer.Decoder( WithCommentsDecode(true), - ).DecodeAll(frame.NewYAMLReader(content.FromBytes(testList))) + ).DecodeAll(frame.NewYAMLReader(stream.FromBytes(testList))) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) - if err := defaultEncoder.Encode(frame.NewWriter(content.ContentTypeYAML, buf), objs...); err != nil { + if err := defaultEncoder.Encode(frame.NewWriter(stream.ContentTypeYAML, buf), objs...); err != nil { t.Fatal(err) } actual := buf.Bytes() diff --git a/pkg/storage/format.go b/pkg/storage/format.go index e2bdb2c1..066bd3bf 100644 --- a/pkg/storage/format.go +++ b/pkg/storage/format.go @@ -1,16 +1,16 @@ package storage -import "github.com/weaveworks/libgitops/pkg/content" +import "github.com/weaveworks/libgitops/pkg/stream" // ContentTypes describes the connection between // file extensions and a content types. -var ContentTypes = map[string]content.ContentType{ - ".json": content.ContentTypeJSON, - ".yaml": content.ContentTypeYAML, - ".yml": content.ContentTypeYAML, +var ContentTypes = map[string]stream.ContentType{ + ".json": stream.ContentTypeJSON, + ".yaml": stream.ContentTypeYAML, + ".yml": stream.ContentTypeYAML, } -func extForContentType(wanted content.ContentType) string { +func extForContentType(wanted stream.ContentType) string { for ext, ct := range ContentTypes { if ct == wanted { return ext diff --git a/pkg/storage/mappedrawstorage.go b/pkg/storage/mappedrawstorage.go index 633da8ef..16f64bbd 100644 --- a/pkg/storage/mappedrawstorage.go +++ b/pkg/storage/mappedrawstorage.go @@ -8,7 +8,7 @@ import ( "sync" log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util" ) @@ -133,7 +133,7 @@ func (r *GenericMappedRawStorage) Checksum(key ObjectKey) (string, error) { return checksumFromModTime(path) } -func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct content.ContentType) { +func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct stream.ContentType) { if file, err := r.realPath(key); err == nil { ct = ContentTypes[filepath.Ext(file)] // Retrieve the correct format based on the extension } diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go index aeddd06c..546093d7 100644 --- a/pkg/storage/rawstorage.go +++ b/pkg/storage/rawstorage.go @@ -9,8 +9,8 @@ import ( "strconv" "strings" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/runtime" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -36,7 +36,7 @@ type RawStorage interface { // If the resource does not exist, it returns ErrNotFound. Checksum(key ObjectKey) (string, error) // ContentType returns the content type of the contents of the resource indicated by key. - ContentType(key ObjectKey) content.ContentType + ContentType(key ObjectKey) stream.ContentType // WatchDir returns the path for Watchers to watch changes in. WatchDir() string @@ -45,7 +45,7 @@ type RawStorage interface { GetKey(path string) (ObjectKey, error) } -func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct content.ContentType) RawStorage { +func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct stream.ContentType) RawStorage { ext := extForContentType(ct) if ext == "" { panic("Invalid content type") @@ -65,7 +65,7 @@ func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct content.Content type GenericRawStorage struct { dir string gv schema.GroupVersion - ct content.ContentType + ct stream.ContentType ext string } @@ -175,7 +175,7 @@ func (r *GenericRawStorage) Checksum(key ObjectKey) (string, error) { return checksumFromModTime(r.keyPath(key)) } -func (r *GenericRawStorage) ContentType(_ ObjectKey) content.ContentType { +func (r *GenericRawStorage) ContentType(_ ObjectKey) stream.ContentType { return r.ct } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 9cc31d11..cc82a49e 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -7,11 +7,11 @@ import ( "fmt" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/filter" "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/stream" patchutil "github.com/weaveworks/libgitops/pkg/util/patch" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" @@ -28,7 +28,7 @@ var ( ) type ReadStorage interface { - // Get returns a new Object for the resource at the specified kind/uid path, based on the file content. + // Get returns a new Object for the resource at the specified kind/uid path, based on the file stream. // If the resource referred to by the given ObjectKey does not exist, Get returns ErrNotFound. Get(key ObjectKey) (runtime.Object, error) @@ -151,7 +151,7 @@ func (s *GenericStorage) GetMeta(key ObjectKey) (runtime.PartialObject, error) { // TODO: Make sure we don't save a partial object func (s *GenericStorage) write(key ObjectKey, obj runtime.Object) error { // Set the content type based on the format given by the RawStorage, but default to JSON - contentType := content.ContentTypeJSON + contentType := stream.ContentTypeJSON if ct := s.raw.ContentType(key); len(ct) != 0 { contentType = ct } @@ -362,7 +362,7 @@ func (s *GenericStorage) decode(key ObjectKey, objBytes []byte) (runtime.Object, logrus.Infof("Decoding with content type %s", ct) obj, err := s.serializer.Decoder( serializer.WithConvertToHubDecode(isInternal), - ).Decode(frame.NewSingleReader(ct, content.FromBytes(objBytes))) + ).Decode(frame.NewSingleReader(ct, stream.FromBytes(objBytes))) // TODO: Multi-frame support if err != nil { return nil, err @@ -381,7 +381,7 @@ func (s *GenericStorage) decode(key ObjectKey, objBytes []byte) (runtime.Object, func (s *GenericStorage) decodeMeta(key ObjectKey, frame []byte) (runtime.PartialObject, error) { gvk := key.GetGVK() - partobjs, err := DecodePartialObjects(content.FromBytes(frame), s.serializer.Scheme(), false, &gvk) + partobjs, err := DecodePartialObjects(stream.FromBytes(frame), s.serializer.Scheme(), false, &gvk) if err != nil { return nil, err } @@ -417,7 +417,7 @@ func (s *GenericStorage) walkKind(kind KindKey, fn func(key ObjectKey, content [ // DecodePartialObjects reads any set of frames from the given ReadCloser, decodes the frames into // PartialObjects, validates that the decoded objects are known to the scheme, and optionally sets a default // group -func DecodePartialObjects(r content.Reader, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) { +func DecodePartialObjects(r stream.Reader, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) { fr := frame.NewYAMLReader(r) ctx := context.TODO() diff --git a/pkg/storage/transaction/git.go b/pkg/storage/transaction/git.go index 5bb2f26d..70e8a4cb 100644 --- a/pkg/storage/transaction/git.go +++ b/pkg/storage/transaction/git.go @@ -6,11 +6,11 @@ import ( "strings" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/gitdir" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/stream" "github.com/weaveworks/libgitops/pkg/util" "github.com/weaveworks/libgitops/pkg/util/watcher" ) @@ -145,7 +145,7 @@ func computeMappings(dir string, s storage.Storage) (map[storage.ObjectKey]strin // can automatically subscribe to changes of objects between versions. m := map[storage.ObjectKey]string{} for _, file := range files { - partObjs, err := storage.DecodePartialObjects(content.FromFile(file), s.Serializer().Scheme(), false, nil) + partObjs, err := storage.DecodePartialObjects(stream.FromFile(file), s.Serializer().Scheme(), false, nil) if err != nil { logrus.Errorf("couldn't decode %q into a partial object: %v", file, err) continue diff --git a/pkg/content/constructors.go b/pkg/stream/constructors.go similarity index 94% rename from pkg/content/constructors.go rename to pkg/stream/constructors.go index 18df7e2a..2c58b211 100644 --- a/pkg/content/constructors.go +++ b/pkg/stream/constructors.go @@ -1,4 +1,4 @@ -package content +package stream import ( "bytes" @@ -7,7 +7,7 @@ import ( "strings" "testing/iotest" - "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/stream/metadata" ) // newErrReader makes a Reader implementation that only returns the given error on Read() @@ -55,7 +55,7 @@ func FromFile(filePath string, opts ...metadata.HeaderOption) Reader { return NewReader(f, opts...) } -// FromBytes returns an io.Reader from the given byte content. +// FromBytes returns an io.Reader from the given byte stream. func FromBytes(content []byte, opts ...metadata.HeaderOption) Reader { // Register the Content-Length opts = append(opts, metadata.WithContentLength(int64(len(content)))) @@ -63,7 +63,7 @@ func FromBytes(content []byte, opts ...metadata.HeaderOption) Reader { return NewReader(bytes.NewReader(content), opts...) } -// FromString returns an io.Reader from the given string content. +// FromString returns an io.Reader from the given string stream. func FromString(content string, opts ...metadata.HeaderOption) Reader { // Register the Content-Length opts = append(opts, metadata.WithContentLength(int64(len(content)))) diff --git a/pkg/content/errors.go b/pkg/stream/errors.go similarity index 98% rename from pkg/content/errors.go rename to pkg/stream/errors.go index 164e3c89..e81a13f9 100644 --- a/pkg/content/errors.go +++ b/pkg/stream/errors.go @@ -1,4 +1,4 @@ -package content +package stream import ( "fmt" diff --git a/pkg/content/interfaces.go b/pkg/stream/interfaces.go similarity index 96% rename from pkg/content/interfaces.go rename to pkg/stream/interfaces.go index a9d85409..7f619d2a 100644 --- a/pkg/content/interfaces.go +++ b/pkg/stream/interfaces.go @@ -1,16 +1,16 @@ -package content +package stream import ( "context" "fmt" "io" - "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/stream/metadata" ) var _ fmt.Stringer = ContentType("") -// ContentType specifies the content type of some content. +// ContentType specifies the content type of some stream. // Ideally, a standard MIME notation like "application/json" shall be used. type ContentType string diff --git a/pkg/content/metadata.go b/pkg/stream/metadata.go similarity index 97% rename from pkg/content/metadata.go rename to pkg/stream/metadata.go index a17f3f16..a5e192d1 100644 --- a/pkg/content/metadata.go +++ b/pkg/stream/metadata.go @@ -1,11 +1,11 @@ -package content +package stream import ( "encoding/json" "net/textproto" "net/url" - "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/stream/metadata" ) // Metadata is the interface that's common to contentMetadataOptions and a wrapper diff --git a/pkg/content/metadata/metadata.go b/pkg/stream/metadata/metadata.go similarity index 93% rename from pkg/content/metadata/metadata.go rename to pkg/stream/metadata/metadata.go index ec565647..25ca55bd 100644 --- a/pkg/content/metadata/metadata.go +++ b/pkg/stream/metadata/metadata.go @@ -1,5 +1,5 @@ // Metadata contains an interface to work with HTTP-like headers carrying metadata about -// some content. +// some stream. package metadata import ( @@ -13,20 +13,20 @@ import ( /* Metadata origin in the system by default: - content.FromFile -> content.Reader + stream.FromFile -> stream.Reader - X-Content-Location - Content-Length - content.FromBytes -> content.Reader + stream.FromBytes -> stream.Reader - Content-Length - content.FromString -> content.Reader + stream.FromString -> stream.Reader - Content-Length - content.ToFile -> content.Writer + stream.ToFile -> stream.Writer - X-Content-Location - content.ToBuffer -> content.Writer + stream.ToBuffer -> stream.Writer frame.NewYAMLReader -> frame.Reader - Content-Type => YAML diff --git a/pkg/content/metadata/metadata_test.go b/pkg/stream/metadata/metadata_test.go similarity index 100% rename from pkg/content/metadata/metadata_test.go rename to pkg/stream/metadata/metadata_test.go diff --git a/pkg/content/reader.go b/pkg/stream/reader.go similarity index 98% rename from pkg/content/reader.go rename to pkg/stream/reader.go index e417096b..7470584f 100644 --- a/pkg/content/reader.go +++ b/pkg/stream/reader.go @@ -1,4 +1,4 @@ -package content +package stream import ( "context" @@ -6,7 +6,7 @@ import ( "io" "os" - "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/stream/metadata" "github.com/weaveworks/libgitops/pkg/tracing" "github.com/weaveworks/libgitops/pkg/util/compositeio" "github.com/weaveworks/libgitops/pkg/util/limitedio" diff --git a/pkg/content/reader_test.go b/pkg/stream/reader_test.go similarity index 98% rename from pkg/content/reader_test.go rename to pkg/stream/reader_test.go index 98b6aea3..d1cdb0de 100644 --- a/pkg/content/reader_test.go +++ b/pkg/stream/reader_test.go @@ -1,4 +1,4 @@ -package content +package stream import ( "bytes" diff --git a/pkg/content/recognizing.go b/pkg/stream/recognizing.go similarity index 98% rename from pkg/content/recognizing.go rename to pkg/stream/recognizing.go index ed3d198b..81e07a6a 100644 --- a/pkg/content/recognizing.go +++ b/pkg/stream/recognizing.go @@ -1,4 +1,4 @@ -package content +package stream import ( "bufio" @@ -8,7 +8,7 @@ import ( "io" "path/filepath" - "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/stream/metadata" "github.com/weaveworks/libgitops/pkg/tracing" "github.com/weaveworks/libgitops/pkg/util/compositeio" "go.opentelemetry.io/otel/trace" diff --git a/pkg/content/recognizing_reader_test.go b/pkg/stream/recognizing_reader_test.go similarity index 98% rename from pkg/content/recognizing_reader_test.go rename to pkg/stream/recognizing_reader_test.go index 804f237a..46cea7c8 100644 --- a/pkg/content/recognizing_reader_test.go +++ b/pkg/stream/recognizing_reader_test.go @@ -1,4 +1,4 @@ -package content +package stream import ( "bufio" diff --git a/pkg/content/recognizing_test.go b/pkg/stream/recognizing_test.go similarity index 96% rename from pkg/content/recognizing_test.go rename to pkg/stream/recognizing_test.go index 0350a6c7..57697fb9 100644 --- a/pkg/content/recognizing_test.go +++ b/pkg/stream/recognizing_test.go @@ -1,10 +1,10 @@ -package content +package stream import ( "testing" "github.com/stretchr/testify/assert" - "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/stream/metadata" ) func Test_negotiateAccept(t *testing.T) { diff --git a/pkg/content/segment_reader.go b/pkg/stream/segment_reader.go similarity index 99% rename from pkg/content/segment_reader.go rename to pkg/stream/segment_reader.go index 62f408ce..da6d9a9b 100644 --- a/pkg/content/segment_reader.go +++ b/pkg/stream/segment_reader.go @@ -1,4 +1,4 @@ -package content +package stream import ( "context" diff --git a/pkg/content/tracing.go b/pkg/stream/tracing.go similarity index 98% rename from pkg/content/tracing.go rename to pkg/stream/tracing.go index f11eec83..8bae8cc1 100644 --- a/pkg/content/tracing.go +++ b/pkg/stream/tracing.go @@ -1,4 +1,4 @@ -package content +package stream import "go.opentelemetry.io/otel/attribute" diff --git a/pkg/content/writer.go b/pkg/stream/writer.go similarity index 97% rename from pkg/content/writer.go rename to pkg/stream/writer.go index 167346ae..2febd917 100644 --- a/pkg/content/writer.go +++ b/pkg/stream/writer.go @@ -1,10 +1,10 @@ -package content +package stream import ( "context" "io" - "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/stream/metadata" "github.com/weaveworks/libgitops/pkg/tracing" "github.com/weaveworks/libgitops/pkg/util/compositeio" "go.opentelemetry.io/otel/trace" diff --git a/pkg/util/patch/patch.go b/pkg/util/patch/patch.go index 37ed6228..23c26ec5 100644 --- a/pkg/util/patch/patch.go +++ b/pkg/util/patch/patch.go @@ -5,10 +5,10 @@ import ( "fmt" "io/ioutil" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/stream" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/strategicpatch" ) @@ -33,7 +33,7 @@ func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) encoder := p.serializer.Encoder() old := new.DeepCopyObject().(runtime.Object) - if err = encoder.Encode(frame.NewJSONWriter(content.NewWriter(&oldBytes)), old); err != nil { + if err = encoder.Encode(frame.NewJSONWriter(stream.NewWriter(&oldBytes)), old); err != nil { return } @@ -41,7 +41,7 @@ func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) return } - if err = encoder.Encode(frame.NewJSONWriter(content.NewWriter(&newBytes)), new); err != nil { + if err = encoder.Encode(frame.NewJSONWriter(stream.NewWriter(&newBytes)), new); err != nil { return } @@ -91,13 +91,13 @@ func (p *patcher) ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVer // with the serializer so it conforms to a runtime.Object // TODO: Just use encoding/json.Indent here instead? func (p *patcher) serializerEncode(input []byte) ([]byte, error) { - obj, err := p.serializer.Decoder().Decode(frame.NewJSONReader(content.FromBytes(input))) + obj, err := p.serializer.Decoder().Decode(frame.NewJSONReader(stream.FromBytes(input))) if err != nil { return nil, err } var result bytes.Buffer - if err := p.serializer.Encoder().Encode(frame.NewJSONWriter(content.NewWriter(&result)), obj); err != nil { + if err := p.serializer.Encoder().Encode(frame.NewJSONWriter(stream.NewWriter(&result)), obj); err != nil { return nil, err } diff --git a/pkg/util/patch/patch_test.go b/pkg/util/patch/patch_test.go index 2453d7e3..af4a56ee 100644 --- a/pkg/util/patch/patch_test.go +++ b/pkg/util/patch/patch_test.go @@ -6,9 +6,9 @@ import ( api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" - "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/runtime" + "github.com/weaveworks/libgitops/pkg/stream" ) var ( @@ -54,7 +54,7 @@ func TestApplyPatch(t *testing.T) { if err != nil { t.Fatal(err) } - frameReader := frame.NewJSONReader(content.FromBytes(result)) + frameReader := frame.NewJSONReader(stream.FromBytes(result)) if err := scheme.Serializer.Decoder().DecodeInto(frameReader, &api.Car{}); err != nil { t.Fatal(err) } From f82afc8a357dc77c9673f7ff9a72796c5c6e35a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 30 Jul 2021 13:49:57 +0300 Subject: [PATCH 17/19] Add stream import --- cmd/sample-app/main.go | 1 + cmd/sample-watch/main.go | 1 + 2 files changed, 2 insertions(+) diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go index 692aa074..18e9c309 100644 --- a/cmd/sample-app/main.go +++ b/cmd/sample-app/main.go @@ -16,6 +16,7 @@ import ( "github.com/weaveworks/libgitops/pkg/logs" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/stream" ) var manifestDirFlag = pflag.String("data-dir", "/tmp/libgitops/manifest", "Where to store the YAML files") diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index d8cc6b95..c6e4738c 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -15,6 +15,7 @@ import ( "github.com/weaveworks/libgitops/pkg/logs" "github.com/weaveworks/libgitops/pkg/storage/watch" "github.com/weaveworks/libgitops/pkg/storage/watch/update" + "github.com/weaveworks/libgitops/pkg/stream" ) var watchDirFlag = pflag.String("watch-dir", "/tmp/libgitops/watch", "Where to watch for YAML/JSON manifests") From db949f86002dbe0866079017dd810f07889f672e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 30 Jul 2021 15:56:18 +0300 Subject: [PATCH 18/19] Add some helpers in the sanitize package, rename prior to original, and add an example. --- pkg/frame/sanitize/example_sanitize_test.go | 137 ++++++++++++++++++++ pkg/frame/sanitize/sanitize.go | 121 ++++++++++------- pkg/frame/sanitize/sanitize_test.go | 9 +- 3 files changed, 221 insertions(+), 46 deletions(-) create mode 100644 pkg/frame/sanitize/example_sanitize_test.go diff --git a/pkg/frame/sanitize/example_sanitize_test.go b/pkg/frame/sanitize/example_sanitize_test.go new file mode 100644 index 00000000..1561ed51 --- /dev/null +++ b/pkg/frame/sanitize/example_sanitize_test.go @@ -0,0 +1,137 @@ +package sanitize + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/pmezard/go-difflib/difflib" + "sigs.k8s.io/yaml" +) + +const testdata = `--- +# root + +apiVersion: sample.com/v1 # bla +# hello +items: +# moveup + - item1 # hello + # bla + - item2 # hi + +kind: MyList # foo +` + +type List struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Items []string `json:"items"` +} + +func Example() { + var list List + original := []byte(testdata) + if err := yaml.UnmarshalStrict(original, &list); err != nil { + log.Fatal(err) + } + list.Items = append(list.Items, "item3") + + out, err := yaml.Marshal(list) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Without sanitation:\n---\n%s---\n", out) + fmt.Printf("Diff without sanitation:\n---\n%s---\n", doDiff(testdata, string(out))) + + ctx := context.Background() + sanitized, err := YAML(ctx, out, original) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("With sanitation:\n---\n%s---\n", sanitized) + fmt.Printf("Diff with sanitation:\n---\n%s---", doDiff(testdata, string(sanitized))) + + // Output: + // Without sanitation: + // --- + // apiVersion: sample.com/v1 + // items: + // - item1 + // - item2 + // - item3 + // kind: MyList + // --- + // Diff without sanitation: + // --- + // --- Expected + // +++ Actual + // @@ -1,13 +1,7 @@ + // ---- + // -# root + // +apiVersion: sample.com/v1 + // +items: + // +- item1 + // +- item2 + // +- item3 + // +kind: MyList + // -apiVersion: sample.com/v1 # bla + // -# hello + // -items: + // -# moveup + // - - item1 # hello + // - # bla + // - - item2 # hi + // - + // -kind: MyList # foo + // - + // --- + // With sanitation: + // --- + // # root + // apiVersion: sample.com/v1 # bla + // # hello + // items: + // # moveup + // - item1 # hello + // # bla + // - item2 # hi + // - item3 + // kind: MyList # foo + // --- + // Diff with sanitation: + // --- + // --- Expected + // +++ Actual + // @@ -1,4 +1,2 @@ + // ---- + // # root + // - + // apiVersion: sample.com/v1 # bla + // @@ -6,7 +4,7 @@ + // items: + // -# moveup + // + # moveup + // - item1 # hello + // - # bla + // + # bla + // - item2 # hi + // - + // + - item3 + // kind: MyList # foo + // --- +} + +func doDiff(a, b string) string { + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(a), + B: difflib.SplitLines(b), + FromFile: "Expected", + ToFile: "Actual", + Context: 1, + }) + // Workaround that gofmt is removing the trailing spaces on an "output testing line" + return strings.ReplaceAll(diff, "\n \n", "\n") +} diff --git a/pkg/frame/sanitize/sanitize.go b/pkg/frame/sanitize/sanitize.go index 398e1baf..f2905fcc 100644 --- a/pkg/frame/sanitize/sanitize.go +++ b/pkg/frame/sanitize/sanitize.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "strings" "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments" @@ -14,18 +13,77 @@ import ( "sigs.k8s.io/kustomize/kyaml/yaml" ) +// JSON sanitizes JSON data in "current" using the NewJSONYAML() sanitizer with the given +// options. Optionally, "original" data can be used to preserve earlier styles. +func JSON(ctx context.Context, current, original []byte, opts ...JSONYAMLOption) ([]byte, error) { + return Sanitize(ctx, NewJSONYAML(opts...), stream.ContentTypeJSON, current, original) +} + +// YAML sanitizes YAML data in "current" using the NewJSONYAML() sanitizer with the given +// options. Optionally, "original" data can be used to preserve earlier styles, e.g. copy +// over comments and remember the sequence indentation style. +func YAML(ctx context.Context, current, original []byte, opts ...JSONYAMLOption) ([]byte, error) { + return Sanitize(ctx, NewJSONYAML(opts...), stream.ContentTypeYAML, current, original) +} + +// Sanitize sanitizes the "current" frame using the Sanitizer s, for the given ContentType if supported. +// If original is non-nil, it'll be used to merge the "current" frame with information from the original, +// for YAML this e.g. means copying comments and remembering the sequence indentation style. +func Sanitize(ctx context.Context, s Sanitizer, ct stream.ContentType, current, original []byte) ([]byte, error) { + if original != nil { + ctx = WithOriginalData(ctx, original) + } + return IfSupported(ctx, s, ct, current) +} + +// IfSupported calls the Sanitizer.Sanitize function using the given Sanitizer if the content type +// is supported. If the content type is not supported, the frame is returned as-is, with no error. +func IfSupported(ctx context.Context, s Sanitizer, ct stream.ContentType, frame []byte) ([]byte, error) { + // If the content type isn't supported, nothing to do + if s == nil || !s.SupportedContentTypes().Has(ct) { + return frame, nil + } + return s.Sanitize(ctx, ct, frame) +} + +// WithOriginalData registers the given frame with the context such that the frame can be used +// as "original data" when sanitizing. Prior data can be used to copy over YAML comments +// automatically from the original data, remember the key order, sequence indentation level, etc. +func WithOriginalData(ctx context.Context, original []byte) context.Context { + return context.WithValue(ctx, originalDataKey, original) +} + +// GetOriginalData retrieves the original data frame, if any, set using WithOriginalData. +func GetOriginalData(ctx context.Context) ([]byte, bool) { + b, ok := ctx.Value(originalDataKey).([]byte) + return b, ok +} + +// ErrTooManyFrames is returned if more than one frame is given to the Sanitizer +const ErrTooManyFrames = strConstError("sanitizing multiple frames at once not supported") + +type strConstError string + +func (s strConstError) Error() string { return string(s) } + +type originalDataKeyStruct struct{} + +var originalDataKey = originalDataKeyStruct{} + // Sanitizer is an interface for sanitizing frames. Note that a sanitizer can only do -// its work correctly if frame actually only contains one frame within. +// its work correctly if only one single frame is given at a time. To chop a byte stream +// into frames, see the pkg/frame package. type Sanitizer interface { // Sanitize sanitizes the frame in a standardized way for the given - // FramingType. If the FramingType isn't known, the Sanitizer can choose between - // returning an ErrUnsupportedFramingType error or just returning frame, nil unmodified. - // If ErrUnsupportedFramingType is returned, the consumer won't probably be able to handle - // other framing types than the default ones, which might not be desired. + // stream.ContentType. If the stream.ContentType isn't known, the Sanitizer should + // return stream.UnsupportedContentTypeError. The consumer can use IfSupported() to + // just skip sanitation if the content type is not supported. If multiple frames are + // given, ErrTooManyFrames can be returned. // // The returned frame should have len == 0 if it's considered empty. Sanitize(ctx context.Context, ct stream.ContentType, frame []byte) ([]byte, error) + // The Sanitizer supports sanitizing one or many content types stream.ContentTypeSupporter } @@ -79,9 +137,9 @@ type jsonYAMLOptions struct { CopyComments *bool /* TODO: ForceMapKeyOrder that can either be - - PreserveOrder (default) => preserves the order from the prior if given. no-op if no prior. + - PreserveOrder (default) => preserves the order from the original if given. no-op if no original. - Alphabetic => sorts all keys alphabetically - - None => don't preserve order from the prior; no-op + - None => don't preserve order from the original; no-op */ } @@ -131,12 +189,10 @@ func (defaultSanitizer) SupportedContentTypes() stream.ContentTypes { return []stream.ContentType{stream.ContentTypeYAML, stream.ContentTypeJSON} } -var ErrTooManyFrames = errors.New("too many frames") - func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte, error) { - // Get prior data, if any (from the context), that we'll use to copy comments over and + // Get original data, if any (from the context), that we'll use to copy comments over and // infer the sequence indenting style. - priorData, hasPriorData := GetPriorData(ctx) + originalData, hasOriginalData := GetOriginalData(ctx) // Parse the current node frameNodes, err := (&kio.ByteReader{ @@ -154,32 +210,32 @@ func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte } frameNode := frameNodes[0] - if hasPriorData && s.opts.CopyComments != nil && *s.opts.CopyComments { - priorNode, err := yaml.Parse(string(priorData)) + if hasOriginalData && s.opts.CopyComments != nil && *s.opts.CopyComments { + originalNode, err := yaml.Parse(string(originalData)) if err != nil { return nil, err } // Copy comments over - if err := comments.CopyComments(priorNode, frameNode, true); err != nil { + if err := comments.CopyComments(originalNode, frameNode, true); err != nil { return nil, err } } return yaml.MarshalWithOptions(frameNode.Document(), &yaml.EncoderOptions{ - SeqIndent: s.resolveSeqStyle(frame, priorData, hasPriorData), + SeqIndent: s.resolveSeqStyle(frame, originalData, hasOriginalData), }) } -func (s *defaultSanitizer) resolveSeqStyle(frame, priorData []byte, hasPriorData bool) yaml.SequenceIndentStyle { +func (s *defaultSanitizer) resolveSeqStyle(frame, originalData []byte, hasOriginalData bool) yaml.SequenceIndentStyle { // If specified, use these; can be used as "force-formatting" directives for consistency if len(s.opts.ForceSeqIndentStyle) != 0 { return s.opts.ForceSeqIndentStyle } - // Otherwise, autodetect the indentation from prior data, if exists, or the current frame + // Otherwise, autodetect the indentation from original data, if exists, or the current frame // If the sequence style cannot be derived; the compact form will be used var deriveYAML string - if hasPriorData { - deriveYAML = string(priorData) + if hasOriginalData { + deriveYAML = string(originalData) } else { deriveYAML = string(frame) } @@ -204,28 +260,3 @@ func (s *defaultSanitizer) handleJSON(frame []byte) ([]byte, error) { // Trim all other spaces than an ending newline return append(bytes.TrimSpace(buf.Bytes()), '\n'), nil } - -func IfSupported(ctx context.Context, s Sanitizer, ct stream.ContentType, frame []byte) ([]byte, error) { - // If the content type isn't supported, nothing to do - if s == nil || !s.SupportedContentTypes().Has(ct) { - return frame, nil - } - return s.Sanitize(ctx, ct, frame) -} - -// WithPriorData registers the given frame with the context such that the frame can be used -// as "prior data" when sanitizing. Prior data can be used to copy over YAML comments -// automatically from the prior data, remember the key order, sequence indentation level, etc. -func WithPriorData(ctx context.Context, frame []byte) context.Context { - return context.WithValue(ctx, priorDataKey, frame) -} - -// GetPriorData retrieves the prior data frame, if any, set using WithPriorData. -func GetPriorData(ctx context.Context) ([]byte, bool) { - b, ok := ctx.Value(priorDataKey).([]byte) - return b, ok -} - -type priorDataKeyStruct struct{} - -var priorDataKey = priorDataKeyStruct{} diff --git a/pkg/frame/sanitize/sanitize_test.go b/pkg/frame/sanitize/sanitize_test.go index 30b4ce15..c9b8b4e3 100644 --- a/pkg/frame/sanitize/sanitize_test.go +++ b/pkg/frame/sanitize/sanitize_test.go @@ -407,7 +407,7 @@ foo: ctx := context.Background() s := NewJSONYAML(tt.opts...) if len(tt.prior) != 0 { - ctx = WithPriorData(ctx, []byte(tt.prior)) + ctx = WithOriginalData(ctx, []byte(tt.prior)) } got, err := s.Sanitize(ctx, tt.ct, []byte(tt.frame)) assert.Equal(t, tt.want, string(got)) @@ -458,3 +458,10 @@ func TestIfSupported(t *testing.T) { }) } } + +func TestJSON(t *testing.T) { + b, err := JSON(context.Background(), []byte(` { "foo" : true } `), nil) + assert.Nil(t, err) + assert.Equal(t, `{"foo":true} +`, string(b)) +} From 032d3d12562991b27c391440cea28f7560164336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 5 Aug 2021 13:46:01 +0300 Subject: [PATCH 19/19] go mod tidy; ignore the vendor directory, and improve the makefile slightly --- .gitignore | 4 +++- Makefile | 4 ++-- go.mod | 1 + 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index ae3df758..d0b36b7a 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,6 @@ .vscode/ # Binary artifacts -bin/ \ No newline at end of file +bin/ +# Don't check in the vendor directory +vendor diff --git a/Makefile b/Makefile index ffa1e793..f3e06041 100644 --- a/Makefile +++ b/Makefile @@ -33,13 +33,13 @@ docker-%: "golang:${GO_VERSION}" make $* test: docker-test-internal -test-internal: +test-internal: tidy-internal go test -v $(addsuffix /...,$(addprefix ./,${SRC_PKGS})) tidy: docker-tidy-internal tidy-internal: /go/bin/goimports go mod tidy - hack/generate-client.sh + if [ -d vendor ]; then go mod vendor; fi gofmt -s -w ${SRC_PKGS} goimports -w ${SRC_PKGS} diff --git a/go.mod b/go.mod index b306115c..a862f302 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/labstack/gommon v0.3.0 // indirect github.com/mattn/go-isatty v0.0.12 // indirect github.com/mitchellh/go-homedir v1.1.0 + github.com/pmezard/go-difflib v1.0.0 github.com/rjeczalik/notify v0.9.2 github.com/sirupsen/logrus v1.7.0 github.com/spf13/pflag v1.0.5